예제 #1
0
def plane_smooth(cube,cubedim=0,parallel=True,numcores=None,**kwargs):
    """
    parallel-map the smooth function

    Parameters
    ----------
    parallel: bool
        defaults True.  Set to false if you want serial (for debug purposes?)
    numcores: int
        pass to parallel_map (None = use all available)
    """
    if not smoothOK:
        return

    if cubedim != 0:
        cube = cube.swapaxes(0,cubedim)

    cubelist = [cube[ii,:,:] for ii in xrange(cube.shape[0])]

    Psmooth = lambda C: smooth(C,**kwargs)

    if parallel:
        smoothcube = array(parallel_map(Psmooth,cubelist,numcores=numcores))
    else:
        smoothcube = array(map(Psmooth,cubelist))
    
    if cubedim != 0:
        smoothcube = smoothcube.swapaxes(0,cubedim)

    return smoothcube
예제 #2
0
def spectral_smooth(cube, smooth_factor, downsample=True, parallel=True,
                    numcores=None, **kwargs):
    """
    Smooth the cube along the spectral direction
    """

    yy,xx = numpy.indices(cube.shape[1:])

    if downsample:
        newshape = cube[::smooth_factor,:,:].shape
    else:
        newshape = cube.shape
    
    # need to make the cube "flat" along dims 1&2 for iteration in the "map"
    flatshape = (cube.shape[0],cube.shape[1]*cube.shape[2])

    Ssmooth = lambda x: pyspeckit.smooth.smooth(x, smooth_factor, downsample=downsample, **kwargs)
    if parallel:
        newcube = numpy.array(parallel_map(Ssmooth, cube.reshape(flatshape).T, numcores=numcores)).T.reshape(newshape)
    else:
        newcube = numpy.array(map(Ssmooth, cube.reshape(flatshape).T)).T.reshape(newshape)

    #naive, non-optimal version
    # for (x,y) in zip(xx.flat,yy.flat):
    #     newcube[:,y,x] = pyspeckit.smooth.smooth(cube[:,y,x], smooth_factor,
    #             downsample=downsample, **kwargs)

    return newcube
예제 #3
0
    def momenteach(self, verbose=True, verbose_level=1, multicore=0, **kwargs):
        """
        Return a cube of the moments of each pixel

        Parameters
        ----------
        multicore: int
            if >0, try to use multiprocessing via parallel_map to run on multiple cores
        """

        if not hasattr(self.mapplot, 'plane'):
            self.mapplot.makeplane()

        yy, xx = np.indices(self.mapplot.plane.shape)
        if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
            OK = (True - self.mapplot.plane.mask) * self.maskmap
        else:
            OK = np.isfinite(self.mapplot.plane) * self.maskmap
        valid_pixels = zip(xx[OK], yy[OK])

        # run the moment process to find out how many elements are in a moment
        _temp_moment = self.get_spectrum(yy[OK][0],
                                         xx[OK][0]).moments(**kwargs)

        self.momentcube = np.zeros((len(_temp_moment), ) +
                                   self.mapplot.plane.shape)

        t0 = time.time()

        def moment_a_pixel(iixy):
            ii, x, y = iixy
            sp = self.get_spectrum(x, y)
            self.momentcube[:, y, x] = sp.moments(**kwargs)
            if verbose:
                if ii % 10**(3 - verbose_level) == 0:
                    print "Finished moment %i.  Elapsed time is %0.1f seconds" % (
                        ii, time.time() - t0)

            return ((x, y), self.momentcube[:, y, x])

        if multicore > 0:
            sequence = [(ii, x, y)
                        for ii, (x, y) in tuple(enumerate(valid_pixels))]
            result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
            merged_result = [
                core_result for core_result in result
                if core_result is not None
            ]
            for mr in merged_result:
                for TEMP in mr:
                    ((x, y), moments) = TEMP
                    self.momentcube[:, y, x] = moments
        else:
            for ii, (x, y) in enumerate(valid_pixels):
                moment_a_pixel((ii, x, y))

        if verbose:
            print "Finished final moment %i.  Elapsed time was %0.1f seconds" % (
                ii, time.time() - t0)
예제 #4
0
def baseline_cube(cube,
                  polyorder=None,
                  cubemask=None,
                  splineorder=None,
                  numcores=None,
                  sampling=1):
    """
    Given a cube, fit a polynomial to each spectrum
    Parameters
    Original version from pyspeckit. It should be included into "spectral-cube"
    ----------
    cube: np.ndarray
        An ndarray with ndim = 3, and the first dimension is the spectral axis
    polyorder: int
        Order of the polynomial to fit and subtract
    cubemask: boolean ndarray
        Mask to apply to cube.  Values that are True will be ignored when
        fitting.
    numcores : None or int
        Number of cores to use for parallelization.  If None, will be set to
        the number of available cores.
    """
    x = np.arange(cube.shape[0], dtype=cube.dtype)
    #polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
    blfunc = blfunc_generator(x=x,
                              splineorder=splineorder,
                              polyorder=polyorder,
                              sampling=sampling)

    reshaped_cube = cube.reshape(cube.shape[0],
                                 cube.shape[1] * cube.shape[2]).T

    if cubemask is None:
        log.debug("No mask defined.")
        fit_cube = reshaped_cube
    else:
        if cubemask.dtype != 'bool':
            raise TypeError("Cube mask *must* be a boolean array.")
        if cubemask.shape != cube.shape:
            raise ValueError("Mask shape does not match cube shape")
        log.debug("Masking cube with shape {0} "
                  "with mask of shape {1}".format(cube.shape, cubemask.shape))
        masked_cube = cube.copy()
        masked_cube[cubemask] = np.nan
        fit_cube = masked_cube.reshape(cube.shape[0],
                                       cube.shape[1] * cube.shape[2]).T

    baselined = np.array(
        parallel_map(blfunc, zip(fit_cube, reshaped_cube), numcores=numcores))
    blcube = baselined.T.reshape(cube.shape)
    return blcube
예제 #5
0
    def momenteach(self, verbose=True, verbose_level=1, multicore=0, **kwargs):
        """
        Return a cube of the moments of each pixel

        Parameters
        ----------
        multicore: int
            if >0, try to use multiprocessing via parallel_map to run on multiple cores
        """

        if not hasattr(self.mapplot,'plane'):
            self.mapplot.makeplane()

        yy,xx = np.indices(self.mapplot.plane.shape)
        if isinstance(self.mapplot.plane, np.ma.core.MaskedArray): 
            OK = (True-self.mapplot.plane.mask) * self.maskmap
        else:
            OK = np.isfinite(self.mapplot.plane) * self.maskmap
        valid_pixels = zip(xx[OK],yy[OK])

        # run the moment process to find out how many elements are in a moment
        _temp_moment = self.get_spectrum(yy[OK][0],xx[OK][0]).moments(**kwargs)

        self.momentcube = np.zeros((len(_temp_moment),)+self.mapplot.plane.shape)

        t0 = time.time()

        def moment_a_pixel(iixy):
            ii,x,y = iixy
            sp = self.get_spectrum(x,y)
            self.momentcube[:,y,x] = sp.moments(**kwargs)
            if verbose:
                if ii % 10**(3-verbose_level) == 0:
                    print "Finished moment %i.  Elapsed time is %0.1f seconds" % (ii, time.time()-t0)

            return ((x,y), self.momentcube[:,y,x])

        if multicore > 0:
            sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
            result = parallel_map(moment_a_pixel, sequence, numcores=multicore)
            merged_result = [core_result for core_result in result if core_result is not None]
            for mr in merged_result:
                for TEMP in mr:
                    ((x,y), moments) = TEMP
                    self.momentcube[:,y,x] = moments
        else:
            for ii,(x,y) in enumerate(valid_pixels):
                moment_a_pixel((ii,x,y))

        if verbose:
            print "Finished final moment %i.  Elapsed time was %0.1f seconds" % (ii, time.time()-t0)
예제 #6
0
def baseline_cube(cube, polyorder, cubemask=None):
    """
    Given a cube, fit a polynomial to each spectrum

    Parameters
    ----------
    cube: np.ndarray
        An ndarray with ndim = 3, and the first dimension is the spectral axis
    polyorder: int
        Order of the polynomial to fit and subtract
    cubemask: boolean ndarray
        Mask to apply to cube.  Values that are True will be ignored when
        fitting.
    """
    x = np.arange(cube.shape[0])

    #polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
    def blfunc(args):
        yfit, yreal = args
        if hasattr(yfit, 'mask'):
            mask = True - yfit.mask
        else:
            mask = yfit == yfit

        if mask.sum() < polyorder:
            return x * 0
        else:
            polypars = np.polyfit(x[mask], yfit[mask], polyorder)
            return yreal - np.polyval(polypars, x)

    reshaped_cube = cube.reshape(cube.shape[0],
                                 cube.shape[1] * cube.shape[2]).T

    if cubemask is None:
        fit_cube = reshaped_cube
    else:
        if cubemask.dtype != 'bool':
            raise TypeError("Cube mask *must* be a boolean array.")
        masked_cube = cube.copy()
        masked_cube[cubemask] = np.nan
        fit_cube = masked_cube.reshape(cube.shape[0],
                                       cube.shape[1] * cube.shape[2]).T

    baselined = np.array(parallel_map(blfunc, zip(fit_cube, reshaped_cube)))
    blcube = baselined.T.reshape(cube.shape)
    return blcube
예제 #7
0
파일: cubes.py 프로젝트: boofed/pyspeckit
def baseline_cube(cube, polyorder=None, cubemask=None, splineorder=None,
                  numcores=None, sampling=1):
    """
    Given a cube, fit a polynomial to each spectrum

    Parameters
    ----------
    cube: np.ndarray
        An ndarray with ndim = 3, and the first dimension is the spectral axis
    polyorder: int
        Order of the polynomial to fit and subtract
    cubemask: boolean ndarray
        Mask to apply to cube.  Values that are True will be ignored when
        fitting.
    numcores : None or int
        Number of cores to use for parallelization.  If None, will be set to
        the number of available cores.
    """
    x = np.arange(cube.shape[0], dtype=cube.dtype)
    #polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
    blfunc = blfunc_generator(x=x,
                              splineorder=splineorder,
                              polyorder=polyorder,
                              sampling=sampling)

    reshaped_cube = cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T

    if cubemask is None:
        log.debug("No mask defined.")
        fit_cube = reshaped_cube
    else:
        if cubemask.dtype != 'bool':
            raise TypeError("Cube mask *must* be a boolean array.")
        if cubemask.shape != cube.shape:
            raise ValueError("Mask shape does not match cube shape")
        log.debug("Masking cube with shape {0} "
                  "with mask of shape {1}".format(cube.shape, cubemask.shape))
        masked_cube = cube.copy()
        masked_cube[cubemask] = np.nan
        fit_cube = masked_cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T


    baselined = np.array(parallel_map(blfunc, zip(fit_cube,reshaped_cube), numcores=numcores))
    blcube = baselined.T.reshape(cube.shape)
    return blcube
예제 #8
0
def baseline_cube(cube, polyorder, cubemask=None):
    """
    Given a cube, fit a polynomial to each spectrum

    Parameters
    ----------
    cube: np.ndarray
        An ndarray with ndim = 3, and the first dimension is the spectral axis
    polyorder: int
        Order of the polynomial to fit and subtract
    cubemask: boolean ndarray
        Mask to apply to cube.  Values that are True will be ignored when
        fitting.
    """
    x = np.arange(cube.shape[0])
    #polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
    def blfunc(args):
        yfit,yreal = args
        if hasattr(yfit,'mask'):
            mask = True-yfit.mask
        else:
            mask = yfit==yfit

        if mask.sum() < polyorder:
            return x*0
        else:
            polypars = np.polyfit(x[mask], yfit[mask], polyorder)
            return yreal-np.polyval(polypars, x)

    reshaped_cube = cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T

    if cubemask is None:
        fit_cube = reshaped_cube
    else:
        if cubemask.dtype != 'bool':
            raise TypeError("Cube mask *must* be a boolean array.")
        masked_cube = cube.copy()
        masked_cube[cubemask] = np.nan
        fit_cube = masked_cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T


    baselined = np.array(parallel_map(blfunc, zip(fit_cube,reshaped_cube)))
    blcube = baselined.T.reshape(cube.shape)
    return blcube
예제 #9
0
    def fiteach(self,
                errspec=None,
                errmap=None,
                guesses=(),
                verbose=True,
                verbose_level=1,
                quiet=True,
                signal_cut=3,
                usemomentcube=False,
                blank_value=0,
                integral=True,
                direct=False,
                absorption=False,
                use_nearest_as_guess=False,
                start_from_point=(0, 0),
                multicore=0,
                continuum_map=None,
                **fitkwargs):
        """
        Fit a spectrum to each valid pixel in the cube

        For guesses, priority is *use_nearest_as_guess*, *usemomentcube*,
        *guesses*, None

        Parameters
        ----------
        use_nearest_as_guess: bool
            Unless the fitted point is the first, it will find the nearest
            other point with a successful fit and use its best-fit parameters
            as the guess
        start_from_point: tuple(int,int)
            Either start from the center or from a point defined by a tuple.
            Work outward from that starting point.  
        guesses: tuple or ndarray[naxis=3]
            Either a tuple/list of guesses with len(guesses) = npars or a cube
            of guesses with shape [npars, ny, nx]
        signal_cut: float
            Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
            spectrum has s/n less than this value, ignore it)
        blank_value: float
            Value to replace non-fitted locations with.  A good alternative is
            numpy.nan
        verbose: bool 
        verbose_level: int
            Controls how much is output.
            0,1 - only changes frequency of updates in loop
            2 - print out messages when skipping pixels
            3 - print out messages when fitting pixels
            4 - specfit will be verbose 
        multicore: int
            if >0, try to use multiprocessing via parallel_map to run on multiple cores
        continuum_map: np.ndarray
            Same shape as error map.  Subtract this from data before estimating noise.

        """

        if not hasattr(self.mapplot, 'plane'):
            self.mapplot.makeplane()

        yy, xx = np.indices(self.mapplot.plane.shape)
        if isinstance(self.mapplot.plane, np.ma.core.MaskedArray):
            OK = (True - self.mapplot.plane.mask) * self.maskmap
        else:
            OK = np.isfinite(self.mapplot.plane) * self.maskmap

        # NAN guesses rule out the model too
        if hasattr(guesses,
                   'shape') and guesses.shape[1:] == self.cube.shape[1:]:
            bad = np.isnan(guesses).sum(axis=0)
            OK *= (True - bad)

        distance = ((xx)**2 + (yy)**2)**0.5
        if start_from_point == 'center':
            start_from_point = (xx.max() / 2., yy.max / 2.)
        d_from_start = np.roll(np.roll(distance, start_from_point[0], 0),
                               start_from_point[1], 1)
        sort_distance = np.argsort(d_from_start.flat)

        valid_pixels = zip(xx.flat[sort_distance][OK.flat[sort_distance]],
                           yy.flat[sort_distance][OK.flat[sort_distance]])

        if verbose_level > 0:
            print "Number of valid pixels: %i" % len(valid_pixels)

        if usemomentcube:
            npars = self.momentcube.shape[0]
        else:
            npars = len(guesses)
            if npars == 0:
                raise ValueError("Parameter guesses are required.")

        self.parcube = np.zeros((npars, ) + self.mapplot.plane.shape)
        self.errcube = np.zeros((npars, ) + self.mapplot.plane.shape)
        if integral:
            self.integralmap = np.zeros((2, ) + self.mapplot.plane.shape)

        # newly needed as of March 27, 2012.  Don't know why.
        if 'fittype' in fitkwargs: self.specfit.fittype = fitkwargs['fittype']
        self.specfit.fitter = self.specfit.Registry.multifitters[
            self.specfit.fittype]

        # array to store whether pixels have fits
        self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')

        global counter
        counter = 0

        t0 = time.time()

        def fit_a_pixel(iixy):
            global counter
            ii, x, y = iixy
            sp = self.get_spectrum(x, y)

            # very annoying - cannot use min/max without checking type
            # maybe can use np.asarray here?
            if hasattr(sp.data, 'mask'):
                sp.data[sp.data.mask] = np.nan
                sp.error[sp.data.mask] = np.nan
                sp.data = np.array(sp.data)
                sp.error = np.array(sp.error)

            if errspec is not None:
                sp.error = errspec
            elif errmap is not None:
                sp.error = np.ones(sp.data.shape) * errmap[y, x]
            else:
                if verbose_level > 1 and ii == 0:
                    print "WARNING: using data std() as error."
                sp.error[:] = sp.data[sp.data == sp.data].std()
            if sp.error is not None and signal_cut > 0:
                if continuum_map is not None:
                    snr = (sp.data - continuum_map[y, x]) / sp.error
                else:
                    snr = sp.data / sp.error
                if absorption:
                    max_sn = np.nanmax(-1 * snr)
                else:
                    max_sn = np.nanmax(snr)
                if max_sn < signal_cut:
                    if verbose_level > 1:
                        print "Skipped %4i,%4i (s/n=%0.2g)" % (x, y, max_sn)
                    return
                elif np.isnan(max_sn):
                    if verbose_level > 1:
                        print "Skipped %4i,%4i (s/n is nan; max(data)=%0.2g, min(error)=%0.2g)" % (
                            x, y, np.nanmax(sp.data), np.nanmin(sp.error))
                    return
                if verbose_level > 2:
                    print "Fitting %4i,%4i (s/n=%0.2g)" % (x, y, max_sn)
            else:
                max_sn = None
            sp.specfit.Registry = self.Registry  # copy over fitter registry

            if use_nearest_as_guess and self.has_fit.sum() > 0:
                if verbose_level > 1 and ii == 0 or verbose_level > 4:
                    print "Using nearest fit as guess"
                d = np.roll(np.roll(distance, x, 0), y, 1)
                # If there's no fit, set its distance to be unreasonably large
                nearest_ind = np.argmin(d + 1e10 * (True - self.has_fit))
                nearest_x, nearest_y = xx.flat[nearest_ind], yy.flat[
                    nearest_ind]
                gg = self.parcube[:, nearest_y, nearest_x]
            elif usemomentcube:
                if verbose_level > 1 and ii == 0: print "Using moment cube"
                gg = self.momentcube[:, y, x]
            elif hasattr(guesses,
                         'shape') and guesses.shape[1:] == self.cube.shape[1:]:
                if verbose_level > 1 and ii == 0:
                    print "Using input guess cube"
                gg = guesses[:, y, x]
            else:
                if verbose_level > 1 and ii == 0: print "Using input guess"
                gg = guesses

            if np.all(np.isfinite(gg)):
                try:
                    sp.specfit(guesses=gg,
                               quiet=verbose_level <= 3,
                               verbose=verbose_level > 3,
                               **fitkwargs)
                except Exception as ex:
                    print "Fit number %i at %i,%i failed on error " % (ii, x,
                                                                       y), ex
                    print "Guesses were: ", gg
                    print "Fitkwargs were: ", fitkwargs
                    if isinstance(ex, KeyboardInterrupt):
                        raise ex
                self.parcube[:, y, x] = sp.specfit.modelpars
                self.errcube[:, y, x] = sp.specfit.modelerrs
                if integral:
                    self.integralmap[:, y, x] = sp.specfit.integral(
                        direct=direct, return_error=True)
                self.has_fit[y, x] = True
            else:
                self.has_fit[y, x] = False
                self.parcube[:, y, x] = blank_value
                self.errcube[:, y, x] = blank_value
                if integral: self.integralmap[:, y, x] = blank_value

            if blank_value != 0:
                self.parcube[self.parcube == 0] = blank_value
                self.errcube[self.parcube == 0] = blank_value

            counter += 1
            if verbose:
                if ii % (min(10**(3 - verbose_level), 1)) == 0:
                    snmsg = " s/n=%5.1f" % (
                        max_sn) if max_sn is not None else ""
                    npix = len(valid_pixels)
                    pct = 100 * counter / float(npix)
                    print "Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds.  %%%01.f" % (
                        counter, npix, x, y, snmsg, time.time() - t0, pct)

            if integral:
                return ((x, y), sp.specfit.modelpars, sp.specfit.modelerrs,
                        self.integralmap[:, y, x])
            else:
                return ((x, y), sp.specfit.modelpars, sp.specfit.modelerrs)

        #### BEGIN TEST BLOCK ####
        # This test block is to make sure you don't run a 30 hour fitting
        # session that's just going to crash at the end.
        # try a first fit for exception-catching
        try0 = fit_a_pixel((0, valid_pixels[0][0], valid_pixels[0][1]))
        assert len(try0[1]) == len(guesses) == len(self.parcube) == len(
            self.errcube)
        assert len(try0[2]) == len(guesses) == len(self.parcube) == len(
            self.errcube)

        # This is a secondary test... I'm not sure it's necessary, but it
        # replicates what's inside the fit_a_pixel code and so should be a
        # useful sanity check
        x, y = valid_pixels[0]
        sp = self.get_spectrum(x, y)
        sp.specfit.Registry = self.Registry  # copy over fitter registry
        # this reproduced code is needed because the functional wrapping
        # required for the multicore case prevents gg from being set earlier
        if usemomentcube:
            gg = self.momentcube[:, y, x]
        elif hasattr(guesses,
                     'shape') and guesses.shape[1:] == self.cube.shape[1:]:
            gg = guesses[:, y, x]
        else:
            gg = guesses

        # This is NOT in a try/except block because we want to raise the
        # exception here if an exception is going to happen
        sp.specfit(guesses=gg, **fitkwargs)
        #### END TEST BLOCK ####

        if multicore > 0:
            sequence = [(ii, x, y)
                        for ii, (x, y) in tuple(enumerate(valid_pixels))]
            result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
            self._result = result  # backup - don't want to lose data in the case of a failure
            # a lot of ugly hacking to deal with the way parallel_map returns
            # its results needs TWO levels of None-filtering, because any
            # individual result can be None (I guess?) but apparently (and this
            # part I don't believe) any individual *fit* result can be None as
            # well (apparently the x,y pairs can also be None?)
            merged_result = [
                core_result for core_result in result
                if core_result is not None
            ]
            # for some reason, every other time I run this code, merged_result
            # ends up with a different intrinsic shape.  This is an attempt to
            # force it to maintain a sensible shape.
            try:
                if integral:
                    ((x, y), m1, m2, intgl) = merged_result[0]
                else:
                    ((x, y), m1, m2) = merged_result[0]
            except ValueError:
                if verbose > 1:
                    print "ERROR: merged_result[0] is ", merged_result[
                        0], " which has the wrong shape"
                merged_result = itertools.chain.from_iterable(merged_result)
            for TEMP in merged_result:
                if TEMP is None:
                    # this shouldn't be possible, but it appears to happen
                    # anyway.  parallel_map is great, up to a limit that was
                    # reached long before this level of complexity
                    continue
                try:
                    if integral:
                        ((x, y), modelpars, modelerrs, intgl) = TEMP
                    else:
                        ((x, y), modelpars, modelerrs) = TEMP
                except TypeError:
                    # implies that TEMP does not have the shape ((a,b),c,d)
                    # as above, shouldn't be possible, but it happens...
                    continue
                if ((len(modelpars) != len(modelerrs))
                        or (len(modelpars) != len(self.parcube))):
                    raise ValueError(
                        "There was a serious problem; modelpar and error shape don't match that of the parameter cubes"
                    )
                if np.any(np.isnan(modelpars)) or np.any(np.isnan(modelerrs)):
                    self.parcube[:, y, x] = np.nan
                    self.errcube[:, y, x] = np.nan
                    self.has_fit[y, x] = False
                else:
                    self.parcube[:, y, x] = modelpars
                    self.errcube[:, y, x] = modelerrs
                    self.has_fit[y, x] = max(modelpars) > 0
                if integral:
                    self.integralmap[:, y, x] = intgl
        else:
            for ii, (x, y) in enumerate(valid_pixels):
                fit_a_pixel((ii, x, y))

        # March 27, 2014: This is EXTREMELY confusing.  This isn't in a loop...
        # make sure the fitter / fittype are set for the cube
        # this has to be done within the loop because skipped-over spectra
        # don't ever get their fittypes set
        self.specfit.fitter = sp.specfit.fitter
        self.specfit.fittype = sp.specfit.fittype
        self.specfit.parinfo = sp.specfit.parinfo

        if verbose:
            print "Finished final fit %i.  Elapsed time was %0.1f seconds" % (
                ii, time.time() - t0)
예제 #10
0
    def fiteach(self, errspec=None, errmap=None, guesses=(), verbose=True,
                verbose_level=1, quiet=True, signal_cut=3, usemomentcube=False,
                blank_value=0, integral=True, direct=False, absorption=False,
                use_nearest_as_guess=False, start_from_point=(0,0), multicore=0,
                continuum_map=None, **fitkwargs):
        """
        Fit a spectrum to each valid pixel in the cube

        For guesses, priority is *use_nearest_as_guess*, *usemomentcube*,
        *guesses*, None

        Parameters
        ----------
        use_nearest_as_guess: bool
            Unless the fitted point is the first, it will find the nearest
            other point with a successful fit and use its best-fit parameters
            as the guess
        start_from_point: tuple(int,int)
            Either start from the center or from a point defined by a tuple.
            Work outward from that starting point.  
        guesses: tuple or ndarray[naxis=3]
            Either a tuple/list of guesses with len(guesses) = npars or a cube
            of guesses with shape [npars, ny, nx]
        signal_cut: float
            Minimum signal-to-noise ratio to "cut" on (i.e., if peak in a given
            spectrum has s/n less than this value, ignore it)
        blank_value: float
            Value to replace non-fitted locations with.  A good alternative is
            numpy.nan
        verbose: bool 
        verbose_level: int
            Controls how much is output.
            0,1 - only changes frequency of updates in loop
            2 - print out messages when skipping pixels
            3 - print out messages when fitting pixels
            4 - specfit will be verbose 
        multicore: int
            if >0, try to use multiprocessing via parallel_map to run on multiple cores
        continuum_map: np.ndarray
            Same shape as error map.  Subtract this from data before estimating noise.

        """

        if not hasattr(self.mapplot,'plane'):
            self.mapplot.makeplane()

        yy,xx = np.indices(self.mapplot.plane.shape)
        if isinstance(self.mapplot.plane, np.ma.core.MaskedArray): 
            OK = (True-self.mapplot.plane.mask) * self.maskmap
        else:
            OK = np.isfinite(self.mapplot.plane) * self.maskmap

        distance = ((xx)**2 + (yy)**2)**0.5
        if start_from_point == 'center':
            start_from_point = (xx.max()/2., yy.max/2.)
        d_from_start = np.roll( np.roll( distance, start_from_point[0], 0), start_from_point[1], 1)
        sort_distance = np.argsort(d_from_start.flat)

        valid_pixels = zip(xx.flat[sort_distance][OK.flat[sort_distance]], 
                yy.flat[sort_distance][OK.flat[sort_distance]])

        if verbose_level > 0:
            print "Number of valid pixels: %i" % len(valid_pixels)

        if usemomentcube:
            npars = self.momentcube.shape[0]
        else:
            npars = len(guesses)
            if npars == 0:
                raise ValueError("Parameter guesses are required.")

        self.parcube = np.zeros((npars,)+self.mapplot.plane.shape)
        self.errcube = np.zeros((npars,)+self.mapplot.plane.shape) 
        if integral: self.integralmap = np.zeros((2,)+self.mapplot.plane.shape)

        # newly needed as of March 27, 2012.  Don't know why.
        if 'fittype' in fitkwargs: self.specfit.fittype = fitkwargs['fittype']
        self.specfit.fitter = self.specfit.Registry.multifitters[self.specfit.fittype]

        # array to store whether pixels have fits
        self.has_fit = np.zeros(self.mapplot.plane.shape, dtype='bool')

        global counter = 0

        t0 = time.time()

        def fit_a_pixel(iixy):
            global counter
            ii,x,y = iixy
            sp = self.get_spectrum(x,y)

            # very annoying - cannot use min/max without checking type
            # maybe can use np.asarray here?
            if hasattr(sp.data,'mask'):
                sp.data[sp.data.mask] = np.nan
                sp.error[sp.data.mask] = np.nan
                sp.data = np.array(sp.data)
                sp.error = np.array(sp.error)

            if errspec is not None:
                sp.error = errspec
            elif errmap is not None:
                sp.error = np.ones(sp.data.shape) * errmap[y,x]
            else:
                if verbose_level > 1 and ii==0: print "WARNING: using data std() as error."
                sp.error[:] = sp.data[sp.data==sp.data].std()
            if sp.error is not None and signal_cut > 0:
                if continuum_map is not None:
                    snr = (sp.data-continuum_map[y,x]) / sp.error
                else:
                    snr = sp.data / sp.error
                if absorption:
                    max_sn = np.nanmax(-1*snr)
                else:
                    max_sn = np.nanmax(snr)
                if max_sn < signal_cut:
                    if verbose_level > 1:
                        print "Skipped %4i,%4i (s/n=%0.2g)" % (x,y,max_sn)
                    return
                elif np.isnan(max_sn):
                    if verbose_level > 1:
                        print "Skipped %4i,%4i (s/n is nan; max(data)=%0.2g, min(error)=%0.2g)" % (x,y,np.nanmax(sp.data),np.nanmin(sp.error))
                    return
                if verbose_level > 2:
                    print "Fitting %4i,%4i (s/n=%0.2g)" % (x,y,max_sn)
            else:
                max_sn = None
            sp.specfit.Registry = self.Registry # copy over fitter registry
            
            if use_nearest_as_guess and self.has_fit.sum() > 0:
                if verbose_level > 1 and ii == 0 or verbose_level > 4: print "Using nearest fit as guess"
                d = np.roll( np.roll( distance, x, 0), y, 1)
                # If there's no fit, set its distance to be unreasonably large
                nearest_ind = np.argmin(d+1e10*(True-self.has_fit))
                nearest_x, nearest_y = xx.flat[nearest_ind],yy.flat[nearest_ind]
                gg = self.parcube[:,nearest_y,nearest_x]
            elif usemomentcube:
                if verbose_level > 1 and ii == 0: print "Using moment cube"
                gg = self.momentcube[:,y,x]
            elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
                if verbose_level > 1 and ii == 0: print "Using input guess cube"
                gg = guesses[:,y,x]
            else:
                if verbose_level > 1 and ii == 0: print "Using input guess"
                gg = guesses

            if np.all(np.isfinite(gg)):
                try:
                    sp.specfit(guesses=gg, quiet=verbose_level<=3, verbose=verbose_level>3, **fitkwargs)
                except Exception as ex:
                    print "Fit number %i at %i,%i failed on error " % (ii,x,y), ex
                    print "Guesses were: ",gg
                    print "Fitkwargs were: ",fitkwargs
                    if isinstance(ex,KeyboardInterrupt):
                        raise ex
                self.parcube[:,y,x] = sp.specfit.modelpars
                self.errcube[:,y,x] = sp.specfit.modelerrs
                if integral: self.integralmap[:,y,x] = sp.specfit.integral(direct=direct,return_error=True)
                self.has_fit[y,x] = True
            else:
                self.has_fit[y,x] = False
                self.parcube[:,y,x] = blank_value
                self.errcube[:,y,x] = blank_value
                if integral: self.integralmap[:,y,x] = blank_value

        
            if blank_value != 0:
                self.parcube[self.parcube == 0] = blank_value
                self.errcube[self.parcube == 0] = blank_value

            counter += 1
            if verbose:
                if ii % (min(10**(3-verbose_level),1)) == 0:
                    snmsg = " s/n=%5.1f" % (max_sn) if max_sn is not None else ""
                    npix = len(valid_pixels)
                    pct = 100 * counter/float(npix)
                    print "Finished fit %6i of %6i at (%4i,%4i)%s. Elapsed time is %0.1f seconds.  %%%01.f" % (counter, npix, x, y, snmsg, time.time()-t0, pct)

            if integral:
                return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs, self.integralmap[:,y,x])
            else:
                return ((x,y), sp.specfit.modelpars, sp.specfit.modelerrs)

        # try a first fit for exception-catching
        try0 = fit_a_pixel((0,valid_pixels[0][0],valid_pixels[0][1]))
        assert len(try0[1]) == len(guesses) == len(self.parcube) == len(self.errcube)
        assert len(try0[2]) == len(guesses) == len(self.parcube) == len(self.errcube)

        if multicore > 0:
            sequence = [(ii,x,y) for ii,(x,y) in tuple(enumerate(valid_pixels))]
            result = parallel_map(fit_a_pixel, sequence, numcores=multicore)
            self._result = result # backup - don't want to lose data in the case of a failure
            # a lot of ugly hacking to deal with the way parallel_map returns
            # its results needs TWO levels of None-filtering, because any
            # individual result can be None (I guess?) but apparently (and this
            # part I don't believe) any individual *fit* result can be None as
            # well (apparently the x,y pairs can also be None?)
            merged_result = [core_result for core_result in result if core_result is not None ]
            # for some reason, every other time I run this code, merged_result
            # ends up with a different intrinsic shape.  This is an attempt to
            # force it to maintain a sensible shape.
            try:
                if integral:
                    ((x,y), m1, m2, intgl) = merged_result[0]
                else:
                    ((x,y), m1, m2) = merged_result[0]
            except ValueError:
                if verbose > 1:
                    print "ERROR: merged_result[0] is ",merged_result[0]," which has the wrong shape"
                merged_result = itertools.chain.from_iterable(merged_result)
            for TEMP in merged_result:
                if TEMP is None:
                    # this shouldn't be possible, but it appears to happen
                    # anyway.  parallel_map is great, up to a limit that was
                    # reached long before this level of complexity
                    continue
                try:
                    if integral:
                        ((x,y), modelpars, modelerrs, intgl) = TEMP
                    else:
                        ((x,y), modelpars, modelerrs) = TEMP
                except TypeError:
                    # implies that TEMP does not have the shape ((a,b),c,d)
                    # as above, shouldn't be possible, but it happens...
                    continue
                self.parcube[:,y,x] = modelpars
                self.errcube[:,y,x] = modelerrs
                self.has_fit[y,x] = max(modelpars) > 0
                if integral:
                    self.integralmap[:,y,x] = intgl
        else:
            for ii,(x,y) in enumerate(valid_pixels):
                fit_a_pixel((ii,x,y))

        x,y = valid_pixels[0]
        sp = self.get_spectrum(x,y)
        sp.specfit.Registry = self.Registry # copy over fitter registry
        # this reproduced code is needed because the functional wrapping
        # required for the multicore case prevents gg from being set earlier
        if usemomentcube:
            gg = self.momentcube[:,y,x]
        elif hasattr(guesses,'shape') and guesses.shape[1:] == self.cube.shape[1:]:
            gg = guesses[:,y,x]
        else:
            gg = guesses

        try:
            sp.specfit(guesses=gg, **fitkwargs)
        except Exception as ex:
            print "Fit number %i at %i,%i failed on error " % (ii,x,y), ex
            print "Guesses were: ",gg
            print "Fitkwargs were: ",fitkwargs
            if isinstance(ex,KeyboardInterrupt):
                raise ex
        # make sure the fitter / fittype are set for the cube
        # this has to be done within the loop because skipped-over spectra
        # don't ever get their fittypes set
        self.specfit.fitter = sp.specfit.fitter
        self.specfit.fittype = sp.specfit.fittype
        self.specfit.parinfo = sp.specfit.parinfo

        if verbose:
            print "Finished final fit %i.  Elapsed time was %0.1f seconds" % (ii, time.time()-t0)
def fit_all_positions(dendrogram=dend,
                      pcube=pcube_merge_high,
                      catalog=catalog,
                      order=1,
                      second_ratio=False,
                      ncores=1,
                      positions=None,
                      outfilename=None):
    if positions is None:
        # Reverse order: start from the smallest trees
        # Positions are y,x
        positions = get_all_indices(dendrogram)[::-1]

    log.info("Fitting {0} positions.".format(len(positions)))

    if outfilename is not None:
        fitted_positions, parvalues, parerrors = read_pars(outfilename)
        outfile = True
    else:
        fitted_positions = []
        outfile = None

    lock = multiprocessing.Lock()

    def get_fitvals(p,
                    plot=False,
                    order=order,
                    second_ratio=second_ratio,
                    outfile=outfile,
                    lock=lock):
        if tuple(p) in fitted_positions:
            return

        log.debug("Fitting position {0}".format(p))
        result = fit_position(p,
                              dendrogram=dendrogram,
                              catalog=catalog,
                              pcube=pcube,
                              plot=False,
                              order=order,
                              second_ratio=second_ratio)

        fitted_positions.append(tuple(p))
        if result is None:
            parvalues.append(None)
            parerrors.append(None)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(
                            p[0], p[1], None, None))
                        outfile.flush()
            return
        else:
            parvalues.append(result.specfit.parinfo.values)
            parerrors.append(result.specfit.parinfo.errors)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(
                            p[0], p[1], result.specfit.parinfo.values,
                            result.specfit.parinfo.errors))
                        outfile.flush()
            return result.specfit.parinfo.values, result.specfit.parinfo.errors

    if ncores == 1:
        results = [
            get_fitvals(p, plot=False, order=order, second_ratio=second_ratio)
            for p in ProgressBar(positions)
        ]
    else:
        results = parallel_map(get_fitvals, positions, numcores=ncores)

    bad_positions = [p for p, r in zip(positions, results) if r is None]
    positions2 = [p for p, r in zip(positions, results) if r is not None]
    results2 = [r for r in results if r is not None]

    return positions2, results2, bad_positions
예제 #12
0
def deblend(para,
            specCubeRef,
            vmin=4.0,
            vmax=11.0,
            f_spcsamp=None,
            tau_wgt=0.1,
            n_cpu=None):
    '''
    Deblend hyperfine structures in a cube based on fitted models, i.e., reconstruct the fitted model with Gaussian
    lines with optical depths accounted for (e.g., similar to CO (J = 0-1))

    :param para: <ndarray>
        The fitted parameters in the order of vel, width, tex, and tau for each velocity slab.
        (Note: the size of the z axis for para must thus be in the multiple of 4)

    :param specCubeRef: <SpectralCube.Cube>
        The reference cube from which the deblended cube is constructed from

    :param vmin: <float>
        The lower veloicty limit on the deblended cube in the unit of km/s

    :param vmax: <float>
        The upper veloicty limit on the deblended cube in the unit of km/s

    :param f_spcsamp: <int>
        The scaling factor for the spectral sampling relative the reference cube
        (e.g., f_spcsamp = 2 give you twice the spectral resolution)

    :param tau_wgt:
        The scaling factor for the input tau
        (e.g., tau_wgt = 0.1 better represents the true optical depth of a NH3 (1,1) hyperfine group than the
         "fitted tau")

    :param n_cpu: <int>
        The number of cpus to use. If None, defaults to all the cpus available minus one.

    :return mcube: <SpectralCube.Cube>
        The deblended cube
    '''

    # open the reference cube file
    cube = specCubeRef
    cube = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')

    # trim the cube to the specified velocity range
    cube = cube.spectral_slab(vmin * u.km / u.s, vmax * u.km / u.s)

    # generate an empty SpectralCube to house the deblended cube
    if f_spcsamp is None:
        deblend = np.zeros(cube.shape)
        hdr = cube.wcs.to_header()
        wcs_new = cube.wcs
    else:
        deblend = np.zeros(
            (cube.shape[0] * int(f_spcsamp), cube.shape[1], cube.shape[2]))
        wcs_new = cube.wcs.deepcopy()
        # adjust the spectral reference value
        wcs_new.wcs.crpix[2] = wcs_new.wcs.crpix[2] * int(f_spcsamp)
        # adjust the spaxel size
        wcs_new.wcs.cdelt[2] = wcs_new.wcs.cdelt[2] / int(f_spcsamp)
        hdr = wcs_new.to_header()

    # retain the beam information
    hdr['BMAJ'] = cube.header['BMAJ']
    hdr['BMIN'] = cube.header['BMIN']
    hdr['BPA'] = cube.header['BPA']

    mcube = SpectralCube(deblend, wcs_new, header=hdr)

    # convert back to an unit that the ammonia hf model can handle (i.e. Hz) without having to create a
    # pyspeckit.spectrum.units.SpectroscopicAxis object (which runs rather slow for model building in comparison)
    mcube = mcube.with_spectral_unit(u.Hz, velocity_convention='radio')
    xarr = mcube.spectral_axis

    yy, xx = np.indices(para.shape[1:])
    # a pixel is valid as long as it has a single finite value
    isvalid = np.any(np.isfinite(para), axis=0)
    valid_pixels = zip(xx[isvalid], yy[isvalid])

    def model_a_pixel(xy):
        x, y = int(xy[0]), int(xy[1])
        # nh3_vtau_singlemodel_deblended takes Hz as the spectral unit
        models = [
            nh3_deblended.nh3_vtau_singlemodel_deblended(xarr,
                                                         Tex=tex,
                                                         tau=tau * tau_wgt,
                                                         xoff_v=vel,
                                                         width=width)
            for vel, width, tex, tau in zip(para[::4, y, x], para[1::4, y, x],
                                            para[2::4, y, x], para[3::4, y, x])
        ]

        mcube._data[:, y, x] = np.nansum(np.array(models), axis=0)
        return ((x, y), mcube._data[:, y, x])

    if n_cpu is None:
        n_cpu = cpu_count() - 1
    else:
        n_cpu = 1

    if n_cpu > 1:
        print("------------------ deblending cube -----------------")
        print("number of cpu used: {}".format(n_cpu))
        sequence = [(x, y) for x, y in valid_pixels]
        result = parallel_map(model_a_pixel, sequence, numcores=n_cpu)
        merged_result = [
            core_result for core_result in result if core_result is not None
        ]
        for mr in merged_result:
            ((x, y), model) = mr
            x = int(x)
            y = int(y)
            mcube._data[:, y, x] = model
    else:
        for xy in ProgressBar(list(valid_pixels)):
            model_a_pixel(xy)

    # convert back to km/s in units before saving
    mcube = mcube.with_spectral_unit(u.km / u.s, velocity_convention='radio')
    gc.collect()
    print("--------------- deblending completed ---------------")

    return mcube
                 np.repeat(cont22[None,:],cube2.shape[0],axis=0).reshape(cube2.cube.shape)[includemask],]

for d in data_iterator:
    if np.any(np.isnan(d)):
        raise ValueError("There are NaNs in the data.  This is strictly impossible.")

args = zip(*data_iterator)[0]
print(args,fit_a_pixel(args))

pb = ProgressBar(len(data_iterator[0]))

#results = map(fit_a_pixel, zip(*(data_iterator + [itertools.cycle((pb,))])),)

# This line contains all the meat!  It can take a long time
results = parallel_map.parallel_map(fit_a_pixel,
                                    zip(*(data_iterator + [itertools.cycle((pb,))])),
                                    numcores=8)

cubenames = ['bestdens','bestcol','besttemp','bestopr','bestchi2',
             'mindens','mincol','mintemp','minopr','minchi2',
             'maxdens','maxcol','maxtemp','maxopr','maxchi2',
             'meandens','meancol','meantemp','meanopr','meanchi2',
             'stddens','stdcol','stdtemp','stdopr','stdchi2',
             'likewtddens','likewtdcol','likewtdtemp','likewtdopr',
             'likestddens','likestdcol','likestdtemp','likestdopr',
            ]
cubetargets = [ (0,0), (0,1), (0,2), (0,3), (0,4), 
                (1,0), (1,1), (1,2), (1,3), (1,4),
                (2,0), (2,1), (2,2), (2,3), (2,4),
                (3,0), (3,1), (3,2), (3,3), (3,4),
                (4,0), (4,1), (4,2), (4,3), (4,4),
def fit_all_positions(dendrogram=dend, pcube=pcube_merge_high, catalog=catalog,
                      order=1, second_ratio=False, ncores=1, positions=None,
                      outfilename=None):
    if positions is None:
        # Reverse order: start from the smallest trees
        # Positions are y,x
        positions = get_all_indices(dendrogram)[::-1]

    log.info("Fitting {0} positions.".format(len(positions)))

    if outfilename is not None:
        fitted_positions,parvalues,parerrors = read_pars(outfilename)
        outfile = True
    else:
        fitted_positions = []
        outfile = None

    lock = multiprocessing.Lock()

    def get_fitvals(p, plot=False, order=order, second_ratio=second_ratio,
                    outfile=outfile, lock=lock):
        if tuple(p) in fitted_positions:
            return

        log.debug("Fitting position {0}".format(p))
        result = fit_position(p, dendrogram=dendrogram, catalog=catalog,
                              pcube=pcube,
                              plot=False, order=order,
                              second_ratio=second_ratio)

        fitted_positions.append(tuple(p))
        if result is None:
            parvalues.append(None)
            parerrors.append(None)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(p[0], p[1], None, None))
                        outfile.flush()
            return
        else:
            parvalues.append(result.specfit.parinfo.values)
            parerrors.append(result.specfit.parinfo.errors)
            if outfile is not None:
                with lock:
                    with open(outfilename, 'a') as outfile:
                        outfile.write("{0}, {1}, {2}, {3}\n".format(p[0], p[1],
                                                                    result.specfit.parinfo.values,
                                                                    result.specfit.parinfo.errors))
                        outfile.flush()
            return result.specfit.parinfo.values, result.specfit.parinfo.errors

    if ncores == 1:
        results = [get_fitvals(p, plot=False, order=order,
                               second_ratio=second_ratio)
                   for p in ProgressBar(positions)]
    else:
        results = parallel_map(get_fitvals, positions, numcores=ncores)

    bad_positions = [p for p,r in zip(positions,results) if r is None]
    positions2 = [p for p,r in zip(positions,results) if r is not None]
    results2 = [r for r in results if r is not None]

    return positions2,results2,bad_positions
def parallelFitOh(contSubCube=None,
                  pyCube=None,
                  vels=None,
                  profileMin=None,
                  profileMax=None,
                  validPixels=None,
                  nComps=None):
    """
      Use more than one core to fit spaxels with line profiles in parallel.

      Fit the individual spaxels while limiting the fitting to the
      velocity range of the line profile's location.

      Parameters
      ----------
      contSubCube: array_like
          Array containing the continuum subtracted spectra.
      pyCube: pyspeckit.Cube()
          Cube instance which contains the fitted line profile model.
      vels: array_like
          A list of the velocities along the spectral axis.
      profileMin/Max: float
          Velocities which bound the line profile range.
      validPixels: list of tuples
          Tuples are the (col,row) values of the spaxels which are to be fit.
      nComps: int
          Number of gaussian components to fit to the line profile.


      Return:
          Dictionary of the pyCube updated with the new gaussian paramters.
  """

    pCygList = [1342225147, 1342199415, 1342237604, 1342212531]

    # Get data shape
    nCols = contSubCube.shape[1]
    nRows = contSubCube.shape[2]

    # Indices which bound the line profile in velocity space.
    minProfIdx = (np.abs(vels - (profileMin - 75))).argmin()
    maxProfIdx = (np.abs(vels - (profileMax + 75))).argmin()

    def fit_a_pixel(iicolrow):
        ii, col, row = iicolrow

        # Make a spectral instance
        sp = pyspeckit.Spectrum(xarr=vels,
                                data=contSubCube[:, col, row],
                                unit='Jy',
                                xarrkwargs={'unit': 'km/s'})

        # Find the flux average inside of the line profile range.
        fluxAve = np.nanmean(contSubCube[minProfIdx:maxProfIdx, col, row])

        if obsId not in pCygList:
            # Absorption
            if fluxAve < 0.:
                peakAmp = np.nanmin(contSubFluxes[minProfIdx:maxProfIdx, col,
                                                  row])

                # parameter limits for this spaxel
                minMaxLimits = [(peakAmp * .9, peakAmp * .1),
                                (profileMin, profileMax), (50, 100)] * 4
                # Enforce the min/max parameter limits?
                minMaxLimited = [(T, T), (F, F), (T, T)] * 4

            # Emission
            else:
                peakAmp = np.nanmax(contSubFluxes[minProfIdx:maxProfIdx, col,
                                                  row])

                # parameter limits for this spaxel
                minMaxLimits = [(peakAmp * .1, peakAmp * .9),
                                (profileMin, profileMax), (50, 100)] * 4
                # Enforce the min/max parameter limits?
                minMaxLimited = [(T, T), (F, F), (T, T)] * 4

            guessesArray = [
                peakAmp * .5, gaussCenter + 50, 100, peakAmp * .5,
                gaussCenter - 50, 100, peakAmp * .5,
                gaussCenter + 50 + doubletSeparation, 100, peakAmp * .5,
                gaussCenter + 50 + doubletSeparation, 100
            ]

        # Any parameters tied to each other?
        tied = [
            '', '', '', '', '', '', 'p[0]', 'p[1]+' + str(doubletSeparation),
            'p[2]', 'p[3]', 'p[4]+' + str(doubletSeparation), 'p[5]'
        ]

        try:
            ## Do the fit!
            sp.specfit(guesses=guessesArray,
                       tied=tied,
                       show_components=True,
                       annotate=False,
                       limits=minMaxLimits,
                       limited=minMaxLimited,
                       quiet=True)
        except:
            pass

        return ((col, row), sp.specfit.modelpars)

    ##################################
    ## Run parallel_map for specfit ##
    ##################################
    sequence = [(ii, col, row)
                for ii, (col, row) in tuple(enumerate(validPixels))]
    result = parallel_map(fit_a_pixel, sequence, numcores=4)

    for i in range(len(result)):
        col, row = result[i][0][0], result[i][0][1]
        pyCube.parcube[:, col, row] = result[i][1]

    return pyCube
def parallelFit(contSubCube=None,
                pyCube=None,
                vels=None,
                profileMin=None,
                profileMax=None,
                validPixels=None,
                nComps=None):
    """
      Use more than one core to fit spaxels in parallel. These spaxels
      have a "high" chi^2 value. "high" is subjective here.

      Fit the individual spaxels while limiting the fitting to the
      velocity range of the line profile's location.

      Parameters
      ----------
      contSubCube: array_like
          Array containing the continuum subtracted spectra.
      pyCube: pyspeckit.Cube()
          Cube instance which contains the fitted line profile model.
      vels: array_like
          A list of the velocities along the spectral axis.
      profileMin/Max: float
          Velocities which bound the line profile range.
      validPixels: list of tuples
          Tuples are the (col,row) values of the spaxels which are to be fit.
      nComps: int
          Number of gaussian components to fit to the line profile.


      Return:
          Dictionary of the pyCube updated with the new gaussian paramters.
  """

    ## Get data shape
    nCols = contSubCube.shape[1]
    nRows = contSubCube.shape[2]

    ## Indices which bound the line profile in velocity space.
    minProfIdx = (np.abs(vels - (profileMin - 75))).argmin()
    maxProfIdx = (np.abs(vels - (profileMax + 75))).argmin()

    def fit_a_pixel(iicolrow):
        ii, col, row = iicolrow

        ## Make a spectral instance
        sp = pyspeckit.Spectrum(xarr=vels,
                                data=contSubCube[:, col, row],
                                unit='Jy',
                                xarrkwargs={'unit': 'km/s'})

        ## Find the peak amplitude inside the line profile range
        peakAmp = np.max(contSubCube[minProfIdx:maxProfIdx, col, row])

        ## Find the velocity of the line profile peak by computing
        ## the index of the flux closest to the peak amplitude value.
        centerIdx = (np.abs(contSubCube[:, col, row] - peakAmp)).argmin()
        gaussCenter = vels[centerIdx]

        ## Create the array for the initial guesses
        guessesArray = [peakAmp * .3, gaussCenter, 50] * nComps

        ## Create some parameter limits for this spaxel
        T, F = True, False
        minMaxLimits = [((peakAmp) * .2, peakAmp), (profileMin, profileMax),
                        (20, 200)] * nComps

        ## Enforce the min/max parameter limits?
        minMaxLimited = [(T, T), (T, T), (T, T)] * nComps

        ## Any parameters tied to each other?
        tied = ['', '', ''] * nComps

        try:
            ## Do the fit!
            sp.specfit(guesses=guessesArray,
                       tied=tied,
                       show_components=True,
                       annotate=False,
                       limits=minMaxLimits,
                       limited=minMaxLimited,
                       quiet=True)
        except:
            pass

        return ((col, row), sp.specfit.modelpars)

    ##################################
    ## Run parallel_map for specfit ##
    ##################################
    sequence = [(ii, col, row)
                for ii, (col, row) in tuple(enumerate(validPixels))]
    result = parallel_map(fit_a_pixel, sequence, numcores=4)

    for i in range(len(result)):
        col, row = result[i][0][0], result[i][0][1]
        pyCube.parcube[:, col, row] = result[i][1]

    return pyCube