Exemplo n.º 1
0
 def GetPSF(self, vshint = None):
     import numpy as np
     from scipy import stats
     
     PSFMode = self.nb2.GetCurrentPage().PSFMode
     #get PSF from file
     if PSFMode == 'File':
         psf, vs = np.load(self.GetPSFFilename())
         psf = np.atleast_3d(psf)
         
         return (self.GetPSFFilename(), psf, vs)        
     elif (PSFMode == 'Laplace'):
         sc = float(self.tLaplaceFWHM.GetValue())/2.0
         X, Y = np.mgrid[-30.:31., -30.:31.]
         R = np.sqrt(X*X + Y*Y)
         
         if not vshint == None:
             vx = vshint*1e3
         else:
             vx = sc/2.
         
         vs = type('vs', (object,), dict(x=vx/1e3, y=vx/1e3))
         
         psf = np.atleast_3d(stats.cauchy.pdf(vx*R, scale=sc))
             
         return 'Generated Laplacian, FWHM=%f' % (2*sc), psf/psf.sum(), vs
Exemplo n.º 2
0
def hausdorffnorm(A, B):
    '''
    Finds the hausdorff norm between two matrices A and B.
    INPUTS:
    A: numpy array
    B : numpy array
    OUTPUTS:
    Housdorff norm between matrices A and B
    '''
    # ensure matrices are 3 dimensional, and shaped conformably
    if len(A.shape) == 1:
        A = np.atleast_2d(A)

    if len(B.shape) == 1:
        B = np.atleast_2d(B)

    A = np.atleast_3d(A)
    B = np.atleast_3d(B)

    x, y, z = B.shape
    A = np.reshape(A, (z, x, y))
    B = np.reshape(B, (z, x, y))

    # find hausdorff norm: starting from A to B
    z, x, y = B.shape
    temp1 = np.tile(np.reshape(B.T, (y, z, x)), (max(A.shape), 1))
    temp2 = np.tile(np.reshape(A.T, (y, x, z)), (1, max(B.shape)))
    D1 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    # starting from B to A
    temp1 = np.tile(np.reshape(A.T, (y, z, x)), (max(B.shape), 1))
    temp2 = np.tile(np.reshape(B.T, (y, x, z)), (1, max(A.shape)))
    D2 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    return np.max([D1, D2])
Exemplo n.º 3
0
def convertRotMatToRisoeU(rMats, U0, symTag='Oh'):
    """
    Makes GrainSpotter gff ouput

    U11 U12 U13 U21 U22 U23 U13 U23 U33

    and takes it into the LLNL/APS frame of reference

    Urows comes from grainspotter's gff output
    U0 comes from XRD.crystallography.latticeVectors.U0
    """
    R = hexrd.XRD.Rotations # formerly import
    
    numU = num.shape(num.atleast_3d(rMats))[0]
    
    Rsamp = num.dot( R.rotMatOfExpMap(piby2*Zl), R.rotMatOfExpMap(piby2*Yl) )
    qin  = R.quatOfRotMat(num.atleast_3d(rMats))
    print "quaternions in (LLNL convention):"
    print qin.T
    qout = num.dot( R.quatProductMatrix( R.quatOfRotMat(Rsamp.T), mult='left' ), \
                    num.dot( R.quatProductMatrix( R.quatOfRotMat(U0), mult='right'),  \
                             qin ).squeeze() ).squeeze()
    if qout.ndim == 1:
        qout = toFundamentalRegion(qout.reshape(4, 1), crysSym=symTag, sampSym=None)
    else:
        qout = toFundamentalRegion(qout, crysSym=symTag, sampSym=None)
    print "quaternions out (Risoe convention, symmetrically reduced)"
    print qout.T
    Uout = R.rotMatOfQuat(qout)
    return Uout
Exemplo n.º 4
0
def iso_integrate(z_w, q, z_iso):
    z_w = np.atleast_3d(z_w)
    q = np.atleast_3d(q)
    if isinstance(z_iso, np.ma.MaskedArray):
        z_iso = z_iso.filled(1e20)
    z_iso *= np.ones(q.shape[1:])
    return _iso.integrate(z_w, q, z_iso)
Exemplo n.º 5
0
 def GetPSF(self, vshint = None):
     psfKey = (self.psfType, self.psfFilename, self.lorentzianFWHM, self.beadDiameter)
     
     if not psfKey in self._psfCache.keys():
         if self.psfType == 'file':
             psf, vs = np.load(self.psfFilename)
             psf = np.atleast_3d(psf)
             
             self._psfCache[psfKey] = (psf, vs)        
         elif (self.psfType == 'Laplace'):
             from scipy import stats
             sc = self.lorentzianFWHM/2.0
             X, Y = np.mgrid[-30.:31., -30.:31.]
             R = np.sqrt(X*X + Y*Y)
             
             if not vshint is None:
                 vx = vshint[0]
             else:
                 vx = sc/2.
             
             vs = type('vs', (object,), dict(x=vx/1e3, y=vx/1e3))
             
             psf = np.atleast_3d(stats.cauchy.pdf(vx*R, scale=sc))
                 
             self._psfCache[psfKey] = (psf/psf.sum(), vs)
         elif (self.psfType == 'bead'):
             from PYME.Deconv import beadGen
             psf = beadGen.genBeadImage(self.beadDiameter/2, vshint)
             
             vs = type('vs', (object,), dict(x=vshint[0]/1e3, y=vshint[1]/1e3))
             
             self._psfCache[psfKey] = (psf/psf.sum(), vs)
             
             
     return self._psfCache[psfKey]
Exemplo n.º 6
0
    def depth_image(self):
        self._call_on_changed()

        gl = self.glb
        gl.Clear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        gl.PolygonMode(GL_FRONT_AND_BACK, GL_FILL)
        draw_noncolored_verts(gl, self.camera.v.r, self.f)
        result = np.asarray(deepcopy(gl.getDepth()), np.float64)

        if self.overdraw:
            gl.PolygonMode(GL_FRONT_AND_BACK, GL_LINE)
            draw_noncolored_verts(gl, self.camera.v.r, self.f)
            overdraw = np.asarray(deepcopy(gl.getDepth()), np.float64)
            gl.PolygonMode(GL_FRONT_AND_BACK, GL_FILL)
            boundarybool_image = self.boundarybool_image
            result = overdraw*boundarybool_image + result*(1-boundarybool_image)

        if hasattr(self, 'background_image'):
            if False: # has problems at boundaries, not sure why yet
                bg_px = self.visibility_image == 4294967295
                fg_px = 1 - bg_px
                result = bg_px * self.background_image + fg_px * result
            else:
                tmp = np.concatenate((np.atleast_3d(result), np.atleast_3d(self.background_image)), axis=2)
                result = np.min(tmp, axis=2)

        return result
Exemplo n.º 7
0
    def _viewStatesNGL(self, states, statetype, protein, ligand, mols, numsamples):
        if states is None:
            states = range(self.macronum)
        if isinstance(states, int):
            states = [states]
        if mols is None:
            mols = self.getStates(states, statetype, numsamples=min(numsamples, 15))
        colors = [0, 1, 3, 4, 5, 6, 7, 9]
        if protein is None and ligand is None:
            raise NameError('Please provide either the "protein" or "ligand" parameter for viewStates.')
        if protein:
            mol = Molecule()
        if ligand:
            mol = mols[0].copy()
            mol.remove(ligand, _logger=False)
            mol.coords = np.atleast_3d(mol.coords[:, :, 0])
            mol.reps.add(sel='protein', style='NewCartoon', color='Secondary Structure')
        for i, s in enumerate(states):
            if protein:
                mol.reps.add(sel='segid ST{}'.format(s), style='NewCartoon', color='Index')
            if ligand:
                mol.reps.add(sel='segid ST{}'.format(s), style='Licorice', color=colors[np.mod(i, len(colors))])
                mols[i].filter(ligand, _logger=False)

            mols[i].set('segid', 'ST{}'.format(s))
            tmpcoo = mols[i].coords
            for j in range(mols[i].numFrames):
                mols[i].coords = np.atleast_3d(tmpcoo[:, :, j])
                mol.append(mols[i])

        w = mol.view(viewer='ngl')
        self._nglButtons(w, statetype, states)
        return w
Exemplo n.º 8
0
def convertRotMatToFableU(rMats, U0=num.eye(3), symTag='Oh', display=False):
    """
    Makes GrainSpotter gff ouput

    U11 U12 U13 U21 U22 U23 U13 U23 U33

    and takes it into the hexrd/APS frame of reference

    Urows comes from grainspotter's gff output
    U0 comes from xrd.crystallography.latticeVectors.U0
    """
    numU = num.shape(num.atleast_3d(rMats))[0]

    qin  = quatOfRotMat(num.atleast_3d(rMats))
    qout = num.dot( quatProductMatrix( quatOfRotMat(fableSampCOB.T), mult='left' ), \
                    num.dot( quatProductMatrix( quatOfRotMat(U0), mult='right'),  \
                             qin ).squeeze() ).squeeze()
    if qout.ndim == 1:
        qout = toFundamentalRegion(qout.reshape(4, 1), crysSym=symTag, sampSym=None)
    else:
        qout = toFundamentalRegion(qout, crysSym=symTag, sampSym=None)
    if display:
        print "quaternions in (hexrd convention):"
        print qin.T
        print "quaternions out (Fable convention, symmetrically reduced)"
        print qout.T
        pass
    Uout = rotMatOfQuat(qout)
    return Uout
Exemplo n.º 9
0
    def _raw_predict(self, Xnew, full_cov=False, kern=None):
        """
        Make a prediction for the latent function values
        """

        if kern is None: kern = self.kern

        if not isinstance(Xnew, VariationalPosterior):
            Kx = kern.K(self.Z, Xnew)
            mu = np.dot(Kx.T, self.posterior.woodbury_vector)
            if full_cov:
                Kxx = kern.K(Xnew)
                if self.posterior.woodbury_inv.ndim == 2:
                    var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx))
                elif self.posterior.woodbury_inv.ndim == 3:
                    var = Kxx[:,:,None] - np.tensordot(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx).T, Kx, [1,0]).swapaxes(1,2)
                var = var
            else:
                Kxx = kern.Kdiag(Xnew)
                var = (Kxx - np.sum(np.dot(np.atleast_3d(self.posterior.woodbury_inv).T, Kx) * Kx[None,:,:], 1)).T
        else:
            Kx = kern.psi1(self.Z, Xnew)
            mu = np.dot(Kx, self.posterior.woodbury_vector)
            if full_cov:
                raise NotImplementedError, "TODO"
            else:
                Kxx = kern.psi0(self.Z, Xnew)
                psi2 = kern.psi2(self.Z, Xnew)
                var = Kxx - np.sum(np.sum(psi2 * Kmmi_LmiBLmi[None, :, :], 1), 1)
        return mu, var
Exemplo n.º 10
0
    def Project(self, projType):
        import numpy as np
        from PYME.DSView.image import ImageStack
        from PYME.DSView import ViewIm3D
        import os

        if projType == 'mean':        
            filt_ims = [np.atleast_3d(self.image.data[:,:,:,chanNum].mean(2)) for chanNum in range(self.image.data.shape[3])]
        elif projType == 'max':
            filt_ims = [np.atleast_3d(self.image.data[:,:,:,chanNum].max(2)) for chanNum in range(self.image.data.shape[3])]

        fns = os.path.split(self.image.filename)[1]        
        
        im = ImageStack(filt_ims, titleStub = '%s - %s' %(fns, projType))
        im.mdh.copyEntriesFrom(self.image.mdh)
        im.mdh['Parent'] = self.image.filename
        im.mdh['Processing.Projection'] = projType

        if self.dsviewer.mode == 'visGUI':
            mode = 'visGUI'
        else:
            mode = 'lite'

        dv = ViewIm3D(im, mode=mode, glCanvas=self.dsviewer.glCanvas)

        #set scaling to (0,1)
        for i in range(im.data.shape[3]):
            dv.do.Gains[i] = 1.0
Exemplo n.º 11
0
    def getXYZ(self):
        """ Get XYZ values in world coordinates for each pixel.

        Usage: XYZ = self.getXYZ()

        Input:
            -NONE-

        Output:
            XYZ - M-by-N-by-3 matrix of [X Y Z] world coordinates for each pixel
        """

        if self.XY is not None:
            return np.c_[np.atleast_3d(self.XY), np.atleast_3d(self)]
        else:
            x = np.arange(0, self.width)
            y = np.arange(0, self.height)
            xx, yy = np.meshgrid(x, y)

            XY = np.zeros((self.height, self.width, 2))

            # From depth map to Point Cloud --> use focal distance
            XY[:, :, 0] = (xx - self.K[0, 2]) / self.K[0, 0]
            XY[:, :, 1] = (yy - self.K[1, 2]) / self.K[1, 1]
            XY = XY * np.atleast_3d(self)
            return np.c_[np.atleast_3d(self.XY), np.atleast_3d(self)]
Exemplo n.º 12
0
 def append(self, *args):
     if len(args)<1:
         pass
     else:
         smp=self.mapped_parameters
         print args
         for arg in args:
             #object parameters
             mp=arg.mapped_parameters
             
             if mp.original_filename not in smp.original_files.keys():
                 smp.original_files[mp.original_filename]=arg
                 # add the data to the aggregate array
                 if self.data==None:
                     self.data=np.atleast_3d(arg.data)
                 else:
                     self.data=np.append(self.data,np.atleast_3d(arg.data),axis=2)
                 print "File %s added to aggregate."%mp.original_filename
             else:
                 print "Data from file %s already in this aggregate. \n \
 Delete it first if you want to update it."%mp.original_filename
         # refresh the axes for the new sized data
         self.axes_manager=AxesManager(self._get_undefined_axes_list())
         smp.original_filename="Aggregate Image: %s"%smp.original_files.keys()
         self.summary()
Exemplo n.º 13
0
def getAvgAmplitudes(event_array, trace_array, time_range=None):
    """This routine takes an event_array (time x cells) and
    corresponding trace array and returns the average amplitudes of
    events in each cell.

    :param: event_array - 2 or 3d numpy event array (time x cells, or time x cells x trials)
    :param: time_range - optional list of 2 numbers limiting the time range to count events
    :returns: 2d masked numpy array of event average amplitudes. size is cells x largest number of events.
              masked entries are account for variable number of events
    """
    event_array = np.atleast_3d(event_array)
    trace_array= np.atleast_3d(trace_array)

    max_num_events = getCounts(event_array).max()
    time, cells, trials = event_array.shape

    amps = np.zeros((cells, trials, int(max_num_events)))
    amps[:] = np.nan

    for cell in range(cells):
        for trial in range(trials):
            event_ids = np.unique(event_array[:,cell,trial])[1:]
            for i, event_id in enumerate(event_ids):
                amps[cell, trial, i] = trace_array[event_array == event_id].mean()
    amps = np.ma.array(amps, mask=np.isnan(amps))
    amps = np.squeeze(amps)

    return np.ma.masked_array(amps, np.isnan(amps))
Exemplo n.º 14
0
    def __init__(self, root, noise, option):
        self.root = root
        self.nFeatures = 4
        self.kernelSize = 3
        self.poolLength = 2
        self.nLambda = 112
        self.batchSize = 64
        self.nClasses = [50] * 12
        self.noise = noise
        self.option = option

        self.labels = ['T0', 'T1', 'T2', 'vmic', 'B0', 'B1', 'v0', 'v1', 'thB0', 'thB1', 'chiB0', 'chiB1']

        self.n_pars = len(self.labels)

# BField, theta, chi, vmac, damping, B0, B1, doppler, kl
        self.lower = np.asarray([-3000.0, -1500.0, -3000.0, 0.0, 0.0, 0.0, -7.0, -7.0, 0.0, 0.0, 0.0, 0.0], dtype='float32')
        self.upper = np.asarray([3000.0, 3000.0, 5000.0, 4.0, 3000.0, 3000.0, 7.0, 7.0, 180.0, 180.0, 180.0, 180.0], dtype='float32')
        
        self.dataFile = "../database/database_sir.h5"

        f = h5py.File(self.dataFile, 'r')
        pars = f.get("parameters")
        stokes = f.get("stokes")
        self.nModels, _ = pars.shape
                
        self.nTraining = int(self.nModels * 0.9)
        self.nValidation = int(self.nModels * 0.1)

# Standardize Stokes parameters
        std_values = np.std(np.abs(stokes[0:self.nTraining,:,:]),axis=0)
        stokes /= std_values[None,:,:]

# Save normalization values
        np.save('{0}_normalization.npy'.format(self.root), std_values)
        
        print("Training set: {0}".format(self.nTraining))

        print("Validation set: {0}".format(self.nValidation))

        self.inTrain = []
        for i in range(4):            
            self.inTrain.append(np.atleast_3d(stokes[0:self.nTraining,i,:]).astype('float32'))

        self.inTest = []
        for i in range(4):            
            self.inTest.append(np.atleast_3d(stokes[self.nTraining:,i,:]).astype('float32'))

        self.outTrain = []
        for i in range(self.n_pars):
            outTrain = np.floor((pars[0:self.nTraining, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')            
            self.outTrain.append(np_utils.to_categorical(outTrain, self.nClasses[i]))

        self.outTest = []
        for i in range(self.n_pars):
            outTest = np.floor((pars[self.nTraining:, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')
            self.outTest.append(np_utils.to_categorical(outTest, self.nClasses[i]))

        f.close()
Exemplo n.º 15
0
def regression_plot(Z,X,band_names=None,visible_only=True,figsize=(12,7)):
    """
    Produce a figure with a plot for each image band that displays the
    relationship between depth and radiance and gives a visual representation
    of the regression carried out in the `slopes` and `regressions` methods.

    Notes
    -----
    This method doesn't come directly from Lyzenga 1978 but the author of this
    code found it helpful.

    Parameters
    ----------
    Z : np.ma.MaskedArray
        Array of depth values repeated for each band so that Z.shape==X.shape.
        The mask needs to be the same too so that Z.mask==X.mask for all the
        bands.
    X : np.ma.MaskedArray
        The array of log transformed radiance values from equation B1 of
        Lyzenga 1978.

    Returns
    -------
    figure
        A matplotlib figure.
    """
    if band_names is None:
        band_names = ['Band'+str(i+1) for i in range(X.shape[-1])]
    nbands = X.shape[-1]
    if np.atleast_3d(Z).shape[-1] == 1:
        Z = np.repeat(np.atleast_3d(Z), nbands, 2)
    if visible_only:
        fig, axs = plt.subplots( 2, 3, figsize=figsize)
    else:
        fig, axs = plt.subplots( 2, 4, figsize=figsize )
    regs = regressions(Z,X)
    for i, ax in enumerate(axs.flatten()):
        if i > nbands-1:
            continue
        slp, incpt, rval = regs[:,i]
        # print X.shape, Z.shape
        x, y = equalize_array_masks(Z[...,i], X[...,i])
        if x.count() < 2:
            continue
        x, y = x.compressed(), y.compressed()
        # print "i = {}, x.shape = {}, y.shape = {}".format(i, x.shape, y.shape)
        ax.scatter( x, y, alpha=0.1, edgecolor='none', c='gold' )
        smth = lowess(y,x,frac=0.2)
        # ax.plot(smth.T[0],smth.T[1],c='black',alpha=0.5)
        ax.plot(smth.T[0],smth.T[1],c='black',alpha=0.5,linestyle='--')
        reglabel = "m=%.2f, r=%.2f" % (slp,rval)
        f = lambda x: incpt + slp * x
        ax.plot( x, f(x), c='brown', label=reglabel, alpha=1.0 )
        ax.set_title( band_names[i] )
        ax.set_xlabel( r'Depth (m)' )
        ax.set_ylabel( r'$X_i$' )
        ax.legend(fancybox=True, framealpha=0.5)
    plt.tight_layout()
    return fig
Exemplo n.º 16
0
 def load_texture(self, filename, gray=False, blur=False):
     print "Loading texture from " + filename
     self.pixels = np.atleast_3d(scipy.misc.imread(filename, flatten=gray))
     if blur:
         self.pixels = \
             np.atleast_3d(scipy.misc.imfilter(self.pixels.squeeze(), 
                                                'blur'))
     print "Done loading texture"
Exemplo n.º 17
0
def update(fig):
    """Fit new pointing model and update plots."""
    # Perform early redraw to improve interactivity of clicks (which typically change state of target dots)
    # Target state: 0 = flagged, 1 = unflagged, 2 = highlighted
    target_state = keep * ((target_index == fig.highlighted_target) + 1)
    # Specify colours of flagged, unflagged and highlighted dots, respectively, as RGBA tuples
    dot_colors = np.choose(target_state, np.atleast_3d(np.vstack([(1,1,1,1), (0,0,1,1), (1,0,0,1)]))).T
    for ax in fig.axes[:7]:
        ax.dots.set_facecolors(dot_colors)
    fig.canvas.draw()

    # Fit new pointing model and update results
    params, sigma_params = new_model.fit(az[keep], el[keep], measured_delta_az[keep], measured_delta_el[keep],
                                         std_delta_az[keep], std_delta_el[keep], enabled_params)
    new.update(new_model)

    # Update rest of figure
    fig.texts[3].set_text("$\chi^2$ = %.1f" % new.chi2)
    fig.texts[4].set_text("all sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms))
    new.metrics(target_index == fig.highlighted_target)
    fig.texts[5].set_text("target sky rms = %.3f' (robust %.3f')" % (new.sky_rms, new.robust_sky_rms))
    new.metrics(keep)
    fig.texts[-1].set_text(unique_targets[fig.highlighted_target])
    # Update model parameter strings
    for p, param in enumerate(display_params):
        fig.texts[2*p + 6].set_text(param_to_str(new_model, param) if enabled_params[param] else '')
        # HACK to convert sigmas to arcminutes, but not for P9 and P12 (which are scale factors)
        # This functionality should really reside inside the PointingModel class
        std_param = rad2deg(sigma_params[param]) * 60. if param not in [8, 11] else sigma_params[param]
        std_param_str = ("%.2f'" % std_param) if param not in [8, 11] else ("%.0e" % std_param)
        fig.texts[2*p + 7].set_text(std_param_str if enabled_params[param] and opts.use_stats else '')
        # Turn parameter string bold if it changed significantly from old value
        if np.abs(params[param] - old_model.values()[param]) > 3.0 * sigma_params[param]:
            fig.texts[2*p + 6].set_weight('bold')
            fig.texts[2*p + 7].set_weight('bold')
        else:
            fig.texts[2*p + 6].set_weight('normal')
            fig.texts[2*p + 7].set_weight('normal')
    daz_az, del_az, daz_el, del_el, quiver, before, after = fig.axes[:7]
    # Update quiver plot
    quiver_scale = 0.1 * fig.quiver_scale_slider.val * np.pi / 6 / deg2rad(old.robust_sky_rms / 60.)
    quiver.quiv.set_segments(quiver_segments(new.residual_az, new.residual_el, quiver_scale))
    quiver.quiv.set_color(np.choose(keep, np.atleast_3d(np.vstack([(0.3,0.3,0.3,0.2), (0.3,0.3,0.3,1)]))).T)
    # Update residual plots
    daz_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_xel) * 60.])
    del_az.dots.set_offsets(np.c_[rad2deg(az), rad2deg(new.residual_el) * 60.])
    daz_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_xel) * 60.])
    del_el.dots.set_offsets(np.c_[rad2deg(el), rad2deg(new.residual_el) * 60.])
    after.dots.set_offsets(np.c_[np.arctan2(new.residual_el, new.residual_xel), new.abs_sky_error])
    resid_lim = 1.2 * max(new.abs_sky_error.max(), old.abs_sky_error.max())
    daz_az.set_ylim(-resid_lim, resid_lim)
    del_az.set_ylim(-resid_lim, resid_lim)
    daz_el.set_ylim(-resid_lim, resid_lim)
    del_el.set_ylim(-resid_lim, resid_lim)
    before.set_ylim(0, resid_lim)
    after.set_ylim(0, resid_lim)
    # Redraw the figure
    fig.canvas.draw()
    def __init__(self, root, noise, option):
        self.root = root
        self.nFeatures = 100
        self.kernelSize = 3
        self.poolLength = 2
        self.nLambda = 50
        self.batchSize = 256
        self.nClasses = [50, 50, 50, 50, 10, 20, 20, 20, 20]
        self.noise = noise
        self.option = option

                                # BField, theta, chi, vmac, damping, B0, B1, doppler, kl
        self.lower = np.asarray([0.0,      0.0,   0.0, -7.0, 0.0,  0.15, 0.15, 0.20,  1.0], dtype='float32')
        self.upper = np.asarray([3000.0, 180.0, 180.0,  7.0, 0.5,   1.2,  1.2, 0.80,  5.0], dtype='float32')
        
        self.dataFile = "/net/duna/scratch1/aasensio/deepLearning/milne/database/database_6301_hinode_1component.h5"

        f = h5py.File(self.dataFile, 'r')
        pars = f.get("parameters")
        stokes = f.get("stokes")
        self.nModels, _ = pars.shape

        std_values = np.std(np.abs(stokes),axis=0)
        stokes /= std_values[None,:,:]

	self.sigma_noise = 1e-3 / np.mean(std_values, axis=0)

# Save normalization values        
        np.save('{0}_normalization.npy'.format(self.root), std_values)
                
        self.nTraining = int(self.nModels * 0.9)
        self.nValidation = int(self.nModels * 0.1)
        
        print("Training set: {0}".format(self.nTraining))

        print("Validation set: {0}".format(self.nValidation))

        self.inTrain = []
        for i in range(4):            
            self.inTrain.append(np.atleast_3d(stokes[0:self.nTraining,:,i]).astype('float32'))

        self.inTest = []
        for i in range(4):            
            self.inTest.append(np.atleast_3d(stokes[self.nTraining:,:,i]).astype('float32'))

        self.outTrain = []
        for i in range(9):
            outTrain = np.floor((pars[0:self.nTraining, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')            
            self.outTrain.append(np_utils.to_categorical(outTrain, self.nClasses[i]))

        self.outTest = []
        for i in range(9):
            outTest = np.floor((pars[self.nTraining:, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')
            self.outTest.append(np_utils.to_categorical(outTest, self.nClasses[i]))

        f.close()
Exemplo n.º 19
0
def merge3D(A, B, position):
    A = np.atleast_3d(A)
    B = np.atleast_3d(B)

    mat_temp = np.nan * np.ones([max(A.shape[0] + position[0], B.shape[0]), max(position[1] + A.shape[1], B.shape[1]),
                                 max(position[2] + A.shape[2], B.shape[2])])
    mat_temp[0:B.shape[0], 0:B.shape[1], 0:B.shape[2]] = B
    mat_temp[position[0]:position[0] + A.shape[0], position[1]:position[1] + A.shape[1],
    position[2]:position[2] + A.shape[2]] = A

    return mat_temp
Exemplo n.º 20
0
 def get_transparent_item_heights_and_mask(self, low_limit, high_limit):
     low_limit_3d = numpy.atleast_3d(low_limit)
     high_limit_3d = numpy.atleast_3d(high_limit)
     max_height = self.blocks.shape[2]
     shape = self.blocks.shape
     trimmed_shape = (shape[0], shape[1], shape[2]-1)
     cell_depth = numpy.indices(trimmed_shape)[2]
     cell_is_selected = numpy.logical_and(cell_depth>=low_limit_3d, cell_depth<high_limit_3d)
     selectable_substance = numpy.logical_and(tileid_is_transparent[self.blocks[:,:,:-1]], self.blocks[:,:,:-1] != 0)
     potential_blocks = numpy.logical_and(selectable_substance, cell_is_selected)
     floor_heights = (max_height-2)-numpy.argmax(potential_blocks[:,:,::-1], axis=2)
     mask = get_cells_using_heightmap(potential_blocks, floor_heights)
     return numpy.clip(floor_heights, low_limit, high_limit), mask
Exemplo n.º 21
0
 def covariance(self):
     """
     Posterior covariance
     $$
     K_{xx} - K_{xx}W_{xx}^{-1}K_{xx}
     W_{xx} := \texttt{Woodbury inv}
     $$
     """
     if self._covariance is None:
         #LiK, _ = dtrtrs(self.woodbury_chol, self._K, lower=1)
         self._covariance = (np.atleast_3d(self._K) - np.tensordot(np.dot(np.atleast_3d(self.woodbury_inv).T, self._K), self._K, [1,0]).T).squeeze()
         #self._covariance = self._K - self._K.dot(self.woodbury_inv).dot(self._K)
     return self._covariance
Exemplo n.º 22
0
def _join_metrics(A, B):
    """Join two image metric dictionaries."""
    for key in list(B.keys()):
        if key in A:
            A[key][0] = np.concatenate((A[key][0], B[key][0]))

            A[key][1] = np.concatenate(
                (np.atleast_3d(A[key][1]), np.atleast_3d(B[key][1])), axis=2
            )

        else:
            A[key] = B[key]

    return A
Exemplo n.º 23
0
def invalid_fill(data, invalid):
    """
    Fill invalid data with values from nearest valid data. This function calls
    `invalid_fill_single` on each image band seperately to avoid filling with
    values from another band.
    """
    data = np.atleast_3d(data)
    invalid = np.atleast_3d(invalid)
    nbands = data.shape[-1]
    outarr = data.copy()
    for b in range(nbands):
        outarr[...,b] = invalid_fill_single(outarr[...,b], invalid[...,b])
    outarr = np.ma.masked_where(data.mask, outarr)
    return outarr
Exemplo n.º 24
0
def hist_cost_2(BH1,BH2):

	nsamp1,nbins=BH1.shape
	nsamp2,nbins=BH2.shape

	eps  = 2.2204e-16
	BH1n = BH1 / (np.sum(BH1,axis=1,keepdims=True)+eps)
	BH2n = BH2 / (np.sum(BH2,axis=1,keepdims=True)+eps)

	tmp1 = np.tile(np.transpose(np.atleast_3d(BH1n),[0,2,1]),(1,nsamp2,1))
	tmp2 = np.tile(np.transpose(np.atleast_3d(BH2n.T),[2,1,0]),(nsamp1,1,1))
	HC = 0.5*np.sum((tmp1-tmp2)**2/(tmp1+tmp2+eps),axis=2)

	return HC
Exemplo n.º 25
0
def mnd(x, mu, sigma):
    k = np.shape(sigma)[0]

    # make mu and x column vectors
    x = np.atleast_3d(x)
    # x = np.matrix(x).T
    mu = np.atleast_3d(mu)
    # mu = np.matrix(mu).T
    x_minus_mu = x - mu
    left = np.array(np.mat(x_minus_mu) * sigma.I)
    # exponent = -0.5 * np.sum(np.mat(x_minus_mu)*sigma.I*np.mat(x_minus_mu).T, axis=1)
    right = x_minus_mu.squeeze()
    exponent = -0.5 * np.sum(left * right, axis=1)
    return (1.0 / (np.sqrt((2 * np.pi)**k) * np.linalg.det(sigma)) * np.exp(exponent))
Exemplo n.º 26
0
def stabilization(data, m_hat, sigma, N, mask=None, clip_eta=True, return_eta=False, n_cores=None, mp_method=None):

    data = np.asarray(data)
    m_hat = np.asarray(m_hat)
    sigma = np.atleast_3d(sigma)
    N = np.atleast_3d(N)

    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=np.bool)
    else:
        mask = np.asarray(mask, dtype=np.bool)

    if N.ndim < data.ndim:
        N = np.broadcast_to(N[..., None], data.shape)

    if sigma.ndim == (data.ndim - 1):
        sigma = np.broadcast_to(sigma[..., None], data.shape)

    # Check all dims are ok
    if (data.shape != sigma.shape):
        raise ValueError('data shape {} is not compatible with sigma shape {}'.format(data.shape, sigma.shape))

    if (data.shape[:-1] != mask.shape):
        raise ValueError('data shape {} is not compatible with mask shape {}'.format(data.shape, mask.shape))

    if (data.shape != m_hat.shape):
        raise ValueError('data shape {} is not compatible with m_hat shape {}'.format(data.shape, m_hat.shape))

    arglist = ((data[..., idx, :],
                m_hat[..., idx, :],
                mask[..., idx],
                sigma[..., idx, :],
                N[..., idx, :],
                clip_eta)
               for idx in range(data.shape[-2]))

    parallel_stabilization = multiprocesser(multiprocess_stabilization, n_cores=n_cores, mp_method=mp_method)
    output = parallel_stabilization(arglist)

    data_stabilized = np.zeros_like(data, dtype=np.float32)
    eta = np.zeros_like(data, dtype=np.float32)

    for idx, content in enumerate(output):
        data_stabilized[..., idx, :] = content[0]
        eta[..., idx, :] = content[1]

    if return_eta:
        return data_stabilized, eta
    return data_stabilized
Exemplo n.º 27
0
def dImage_wrt_2dVerts(observed, visible, visibility, barycentric, image_width, image_height, num_verts, f):
    """Construct a sparse jacobian that relates 2D projected vertex positions
    (in the columns) to pixel values (in the rows). This can be done
    in two steps."""

    n_channels = np.atleast_3d(observed).shape[2]
    shape = visibility.shape

    # Step 1: get the structure ready, ie the IS and the JS
    IS = np.tile(col(visible), (1, 2*f.shape[1])).ravel()
    JS = col(f[visibility.ravel()[visible]].ravel())
    JS = np.hstack((JS*2, JS*2+1)).ravel()

    pxs = np.asarray(visible % shape[1], np.int32)
    pys = np.asarray(np.floor(np.floor(visible) / shape[1]), np.int32)

    if n_channels > 1:
        IS = np.concatenate([IS*n_channels+i for i in range(n_channels)])
        JS = np.concatenate([JS for i in range(n_channels)])

    # Step 2: get the data ready, ie the actual values of the derivatives
    ksize=1
    sobel_normalizer = cv2.Sobel(np.asarray(np.tile(row(np.arange(10)), (10, 1)), np.float64), cv2.CV_64F, dx=1, dy=0, ksize=ksize)[5,5]
    xdiff = -cv2.Sobel(observed, cv2.CV_64F, dx=1, dy=0, ksize=ksize) / sobel_normalizer
    ydiff = -cv2.Sobel(observed, cv2.CV_64F, dx=0, dy=1, ksize=ksize) / sobel_normalizer

    xdiff = np.atleast_3d(xdiff)
    ydiff = np.atleast_3d(ydiff)

    datas = []

    # The data is weighted according to barycentric coordinates
    bc0 = col(barycentric[pys, pxs, 0])
    bc1 = col(barycentric[pys, pxs, 1])
    bc2 = col(barycentric[pys, pxs, 2])
    for k in range(n_channels):
        dxs = xdiff[pys, pxs, k]
        dys = ydiff[pys, pxs, k]
        if f.shape[1] == 3:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1,col(dxs)*bc2,col(dys)*bc2)).ravel())
        else:
            datas.append(np.hstack((col(dxs)*bc0,col(dys)*bc0,col(dxs)*bc1,col(dys)*bc1)).ravel())

    data = np.concatenate(datas)

    ij = np.vstack((IS.ravel(), JS.ravel()))
    result = sp.csc_matrix((data, ij), shape=(image_width*image_height*n_channels, num_verts*2))

    return result
Exemplo n.º 28
0
 def get_floor_heights_and_mask(self, low_limit, high_limit, include_transparent=True):
     low_limit_3d = numpy.atleast_3d(low_limit)
     high_limit_3d = numpy.atleast_3d(high_limit)
     max_height = self.blocks.shape[2]
     shape = self.blocks.shape
     trimmed_shape = (shape[0], shape[1], shape[2]-1)
     cell_depth = numpy.indices(trimmed_shape)[2]
     cell_is_selected = numpy.logical_and(cell_depth>=low_limit_3d, cell_depth<high_limit_3d)
     selectable_substance = self.blocks[:,:,:-1] if include_transparent else numpy.logical_not(tileid_is_transparent[self.blocks[:,:,:-1]])
     potential_floors = numpy.logical_and(selectable_substance, cell_is_selected)
     potential_footspace = self.blocks[:,:,1:] if include_transparent else numpy.logical_not(tileid_is_transparent[self.blocks[:,:,1:]])
     good_floors = numpy.logical_and(potential_floors!=0, potential_footspace==0)
     floor_heights = (max_height-2)-numpy.argmax(good_floors[:,:,::-1], axis=2)
     mask = get_cells_using_heightmap(good_floors, floor_heights)
     return numpy.clip(floor_heights, low_limit, high_limit), mask
Exemplo n.º 29
0
 def filter(self, image):
     if self.processFramesIndividually:
         filt_ims = []
         for chanNum in range(image.data.shape[3]):
             filt_ims.append(np.concatenate([np.atleast_3d(self.applyFilter(image.data[:,:,i,chanNum].squeeze().astype('f'), chanNum, i, image)) for i in range(image.data.shape[2])], 2))
     else:
         filt_ims = [np.atleast_3d(self.applyFilter(image.data[:,:,:,chanNum].squeeze().astype('f'), chanNum, 0, image)) for chanNum in range(image.data.shape[3])]
         
     im = ImageStack(filt_ims, titleStub = self.outputName)
     im.mdh.copyEntriesFrom(image.mdh)
     im.mdh['Parent'] = image.filename
     
     self.completeMetadata(im)
     
     return im
Exemplo n.º 30
0
    def __init__(self, root, noise, option):
        self.root = root
        self.nFeatures = 50
        self.kernelSize = 5
        self.poolLength = 2
        self.nLambda = 64
        self.batchSize = 256
        self.nClasses = [50, 50, 50, 50, 10, 20, 20, 20, 20]
        self.noise = noise
        self.option = option

# BField, theta, chi, vmac, damping, B0, B1, doppler, kl
        self.lower = np.asarray([0.0,      0.0,   0.0, -7.0, 0.0,  0.15, 0.15, 0.20,  1.0], dtype='float32')
        self.upper = np.asarray([3000.0, 180.0, 180.0,  7.0, 0.5,   1.2,  1.2, 0.80,  5.0], dtype='float32')
        
        self.dataFile = "/net/viga/scratch1/deepLearning/DNMilne/database/database_BigBear.h5"

        f = h5py.File(self.dataFile, 'r')
        pars = f.get("parameters")
        stokes = f.get("stokes")
        self.nModels, _ = pars.shape
                
        self.nTraining = int(self.nModels * 0.9)
        self.nValidation = int(self.nModels * 0.1)
        
        print("Training set: {0}".format(self.nTraining))

        print("Validation set: {0}".format(self.nValidation))

        self.inTrain = []
        for i in range(4):            
            self.inTrain.append(np.atleast_3d(stokes[0:self.nTraining,:,i]).astype('float32'))

        self.inTest = []
        for i in range(4):            
            self.inTest.append(np.atleast_3d(stokes[self.nTraining:,:,i]).astype('float32'))

        self.outTrain = []
        for i in range(9):
            outTrain = np.floor((pars[0:self.nTraining, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')            
            self.outTrain.append(np_utils.to_categorical(outTrain, self.nClasses[i]))

        self.outTest = []
        for i in range(9):
            outTest = np.floor((pars[self.nTraining:, i] - self.lower[i]) / (self.upper[i] - self.lower[i]) * self.nClasses[i]).astype('int32')
            self.outTest.append(np_utils.to_categorical(outTest, self.nClasses[i]))

        f.close()
Exemplo n.º 31
0
def fitmodels_direct(catd,
                     mmix,
                     mask,
                     t2s,
                     t2s_full,
                     tes,
                     combmode,
                     ref_img,
                     reindex=False,
                     mmixN=None,
                     full_sel=True,
                     label=None,
                     out_dir='.',
                     verbose=False):
    """
    Fit TE-dependence and -independence models to components.

    Parameters
    ----------
    catd : (S x E x T) array_like
        Input data, where `S` is samples, `E` is echos, and `T` is time
    mmix : (T x C) array_like
        Mixing matrix for converting input data to component space, where `C`
        is components and `T` is the same as in `catd`
    mask : (S [x E]) array_like
        Boolean mask array
    t2s : (S [x T]) array_like
        Limited T2* map or timeseries.
    t2s_full : (S [x T]) array_like
        Full T2* map or timeseries. For voxels with good signal in only one
        echo, which are zeros in the limited T2* map, this map uses the T2*
        estimate using the first two echoes.
    tes : list
        List of echo times associated with `catd`, in milliseconds
    combmode : {'t2s', 'ste'} str
        How optimal combination of echos should be made, where 't2s' indicates
        using the method of Posse 1999 and 'ste' indicates using the method of
        Poser 2006
    ref_img : str or img_like
        Reference image to dictate how outputs are saved to disk
    reindex : bool, optional
        Default: False
    mmixN : array_like, optional
        Default: None
    full_sel : bool, optional
        Whether to perform selection of components based on Rho/Kappa scores.
        Default: True

    Returns
    -------
    seldict : dict
    comptab : (N x 5) :obj:`pandas.DataFrame`
        Array with columns denoting (1) index of component, (2) Kappa score of
        component, (3) Rho score of component, (4) variance explained by
        component, and (5) normalized variance explained by component
    betas : :obj:`numpy.ndarray`
    mmix_new : :obj:`numpy.ndarray`
    """
    if not (catd.shape[0] == t2s.shape[0] == t2s_full.shape[0] ==
            mask.shape[0]):
        raise ValueError('First dimensions (number of samples) of catd ({0}), '
                         't2s ({1}), and mask ({2}) do not '
                         'match'.format(catd.shape[0], t2s.shape[0],
                                        mask.shape[0]))
    elif catd.shape[1] != len(tes):
        raise ValueError('Second dimension of catd ({0}) does not match '
                         'number of echoes provided (tes; '
                         '{1})'.format(catd.shape[1], len(tes)))
    elif catd.shape[2] != mmix.shape[0]:
        raise ValueError('Third dimension (number of volumes) of catd ({0}) '
                         'does not match first dimension of '
                         'mmix ({1})'.format(catd.shape[2], mmix.shape[0]))
    elif t2s.shape != t2s_full.shape:
        raise ValueError('Shape of t2s array {0} does not match shape of '
                         't2s_full array {1}'.format(t2s.shape,
                                                     t2s_full.shape))
    elif t2s.ndim == 2:
        if catd.shape[2] != t2s.shape[1]:
            raise ValueError('Third dimension (number of volumes) of catd '
                             '({0}) does not match second dimension of '
                             't2s ({1})'.format(catd.shape[2], t2s.shape[1]))

    mask = t2s != 0  # Override mask because problems

    # compute optimal combination of raw data
    tsoc = combine.make_optcom(catd,
                               tes,
                               mask,
                               t2s=t2s_full,
                               combmode=combmode,
                               verbose=False).astype(float)[mask]

    # demean optimal combination
    tsoc_dm = tsoc - tsoc.mean(axis=-1, keepdims=True)

    # compute un-normalized weight dataset (features)
    if mmixN is None:
        mmixN = mmix
    WTS = computefeats2(utils.unmask(tsoc, mask), mmixN, mask, normalize=False)

    # compute PSC dataset - shouldn't have to refit data
    tsoc_B = get_coeffs(tsoc_dm, mmix, mask=None)
    tsoc_Babs = np.abs(tsoc_B)
    PSC = tsoc_B / tsoc.mean(axis=-1, keepdims=True) * 100

    # compute skews to determine signs based on unnormalized weights,
    # correct mmix & WTS signs based on spatial distribution tails
    signs = stats.skew(WTS, axis=0)
    signs /= np.abs(signs)
    mmix = mmix.copy()
    mmix *= signs
    WTS *= signs
    PSC *= signs
    totvar = (tsoc_B**2).sum()
    totvar_norm = (WTS**2).sum()

    # compute Betas and means over TEs for TE-dependence analysis
    betas = get_coeffs(catd, mmix,
                       np.repeat(mask[:, np.newaxis], len(tes), axis=1))
    n_samp, n_echos, n_components = betas.shape
    n_voxels = mask.sum()
    n_data_voxels = (t2s != 0).sum()
    mu = catd.mean(axis=-1, dtype=float)
    tes = np.reshape(tes, (n_echos, 1))
    fmin, _, _ = utils.getfbounds(n_echos)

    # mask arrays
    mumask = mu[t2s != 0]
    t2smask = t2s[t2s != 0]
    betamask = betas[t2s != 0]

    # set up Xmats
    X1 = mumask.T  # Model 1
    X2 = np.tile(tes, (1, n_data_voxels)) * mumask.T / t2smask.T  # Model 2

    # tables for component selection
    kappas = np.zeros([n_components])
    rhos = np.zeros([n_components])
    varex = np.zeros([n_components])
    varex_norm = np.zeros([n_components])
    Z_maps = np.zeros([n_voxels, n_components])
    F_R2_maps = np.zeros([n_data_voxels, n_components])
    F_S0_maps = np.zeros([n_data_voxels, n_components])
    Z_clmaps = np.zeros([n_voxels, n_components])
    F_R2_clmaps = np.zeros([n_data_voxels, n_components])
    F_S0_clmaps = np.zeros([n_data_voxels, n_components])
    Br_R2_clmaps = np.zeros([n_voxels, n_components])
    Br_S0_clmaps = np.zeros([n_voxels, n_components])
    pred_R2_maps = np.zeros([n_data_voxels, n_echos, n_components])
    pred_S0_maps = np.zeros([n_data_voxels, n_echos, n_components])

    LGR.info('Fitting TE- and S0-dependent models to components')
    for i_comp in range(n_components):
        # size of B is (n_echoes, n_samples)
        B = np.atleast_3d(betamask)[:, :, i_comp].T
        alpha = (np.abs(B)**2).sum(axis=0)
        varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100.
        varex_norm[i_comp] = (utils.unmask(WTS, mask)[t2s != 0][:, i_comp]**2).sum() /\
            totvar_norm * 100.

        # S0 Model
        # (S,) model coefficient map
        coeffs_S0 = (B * X1).sum(axis=0) / (X1**2).sum(axis=0)
        pred_S0 = X1 * np.tile(coeffs_S0, (n_echos, 1))
        pred_S0_maps[:, :, i_comp] = pred_S0.T
        SSE_S0 = (B - pred_S0)**2
        SSE_S0 = SSE_S0.sum(axis=0)  # (S,) prediction error map
        F_S0 = (alpha - SSE_S0) * (n_echos - 1) / (SSE_S0)
        F_S0_maps[:, i_comp] = F_S0

        # R2 Model
        coeffs_R2 = (B * X2).sum(axis=0) / (X2**2).sum(axis=0)
        pred_R2 = X2 * np.tile(coeffs_R2, (n_echos, 1))
        pred_R2_maps[:, :, i_comp] = pred_R2.T
        SSE_R2 = (B - pred_R2)**2
        SSE_R2 = SSE_R2.sum(axis=0)
        F_R2 = (alpha - SSE_R2) * (n_echos - 1) / (SSE_R2)
        F_R2_maps[:, i_comp] = F_R2

        # compute weights as Z-values
        wtsZ = (WTS[:, i_comp] - WTS[:, i_comp].mean()) / WTS[:, i_comp].std()
        wtsZ[np.abs(wtsZ) > Z_MAX] = (
            Z_MAX * (np.abs(wtsZ) / wtsZ))[np.abs(wtsZ) > Z_MAX]
        Z_maps[:, i_comp] = wtsZ

        # compute Kappa and Rho
        F_S0[F_S0 > F_MAX] = F_MAX
        F_R2[F_R2 > F_MAX] = F_MAX
        norm_weights = np.abs(
            np.squeeze(utils.unmask(wtsZ, mask)[t2s != 0]**2.))
        kappas[i_comp] = np.average(F_R2, weights=norm_weights)
        rhos[i_comp] = np.average(F_S0, weights=norm_weights)

    # tabulate component values
    comptab = np.vstack([kappas, rhos, varex, varex_norm]).T
    if reindex:
        # re-index all components in Kappa order
        sort_idx = comptab[:, 0].argsort()[::-1]
        comptab = comptab[sort_idx, :]
        mmix_new = mmix[:, sort_idx]
        betas = betas[..., sort_idx]
        pred_R2_maps = pred_R2_maps[:, :, sort_idx]
        pred_S0_maps = pred_S0_maps[:, :, sort_idx]
        F_S0_maps = F_S0_maps[:, sort_idx]
        F_R2_maps = F_R2_maps[:, sort_idx]
        Z_maps = Z_maps[:, sort_idx]
        WTS = WTS[:, sort_idx]
        PSC = PSC[:, sort_idx]
        tsoc_B = tsoc_B[:, sort_idx]
        tsoc_Babs = tsoc_Babs[:, sort_idx]
    else:
        mmix_new = mmix

    if verbose:
        # Echo-specific weight maps for each of the ICA components.
        io.filewrite(betas, op.join(out_dir, label + 'betas_catd.nii'),
                     ref_img)
        # Echo-specific maps of predicted values for R2 and S0 models for each
        # component.
        io.filewrite(utils.unmask(pred_R2_maps, mask),
                     op.join(out_dir, label + 'R2_pred.nii'), ref_img)
        io.filewrite(utils.unmask(pred_S0_maps, mask),
                     op.join(out_dir, label + 'S0_pred.nii'), ref_img)
        # Weight maps used to average metrics across voxels
        io.filewrite(utils.unmask(Z_maps**2., mask),
                     op.join(out_dir, label + 'metric_weights.nii'), ref_img)

    comptab = pd.DataFrame(comptab,
                           columns=[
                               'kappa', 'rho', 'variance explained',
                               'normalized variance explained'
                           ])
    comptab.index.name = 'component'

    # full selection including clustering criteria
    seldict = None
    if full_sel:
        LGR.info('Performing spatial clustering of components')
        csize = np.max([int(n_voxels * 0.0005) + 5, 20])
        LGR.debug('Using minimum cluster size: {}'.format(csize))
        for i_comp in range(n_components):
            # save out files
            out = np.zeros((n_samp, 4))
            out[:, 0] = np.squeeze(utils.unmask(PSC[:, i_comp], mask))
            out[:, 1] = np.squeeze(utils.unmask(F_R2_maps[:, i_comp],
                                                t2s != 0))
            out[:, 2] = np.squeeze(utils.unmask(F_S0_maps[:, i_comp],
                                                t2s != 0))
            out[:, 3] = np.squeeze(utils.unmask(Z_maps[:, i_comp], mask))

            ccimg = io.new_nii_like(ref_img, out)

            # Do simple clustering on F
            sel = spatclust(ccimg,
                            min_cluster_size=csize,
                            threshold=fmin,
                            index=[1, 2],
                            mask=(t2s != 0))
            F_R2_clmaps[:, i_comp] = sel[:, 0]
            F_S0_clmaps[:, i_comp] = sel[:, 1]
            countsigFR2 = F_R2_clmaps[:, i_comp].sum()
            countsigFS0 = F_S0_clmaps[:, i_comp].sum()

            # Do simple clustering on Z at p<0.05
            sel = spatclust(ccimg,
                            min_cluster_size=csize,
                            threshold=1.95,
                            index=3,
                            mask=mask)
            Z_clmaps[:, i_comp] = sel

            # Do simple clustering on ranked signal-change map
            spclust_input = utils.unmask(stats.rankdata(tsoc_Babs[:, i_comp]),
                                         mask)
            spclust_input = io.new_nii_like(ref_img, spclust_input)
            Br_R2_clmaps[:, i_comp] = spatclust(
                spclust_input,
                min_cluster_size=csize,
                threshold=max(tsoc_Babs.shape) - countsigFR2,
                mask=mask)
            Br_S0_clmaps[:, i_comp] = spatclust(
                spclust_input,
                min_cluster_size=csize,
                threshold=max(tsoc_Babs.shape) - countsigFS0,
                mask=mask)

        seldict = {}
        selvars = [
            'WTS', 'tsoc_B', 'PSC', 'Z_maps', 'F_R2_maps', 'F_S0_maps',
            'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps', 'Br_R2_clmaps',
            'Br_S0_clmaps'
        ]
        for vv in selvars:
            seldict[vv] = eval(vv)

    return seldict, comptab, betas, mmix_new
Exemplo n.º 32
0
    def test_apogee_cnn(self):
        """
        Test ApogeeCNN models
        - training, testing, evaluation
        - basic astroNN model method
        """
        print("======ApogeeCNN======")

        # Data preparation
        random_xdata = np.random.normal(0, 1, (200, 1024))
        random_ydata = np.random.normal(0, 1, (200, 2))

        # setup model instance
        neuralnet = ApogeeCNN()
        print(neuralnet)
        # assert no model before training
        self.assertEqual(neuralnet.has_model, False)
        neuralnet.max_epochs = 3  # for quick result
        neuralnet.callbacks = ErrorOnNaN(
        )  # Raise error and fail the test if Nan
        neuralnet.train(random_xdata, random_ydata)  # training
        neuralnet.train_on_batch(random_xdata,
                                 random_ydata)  # single batch fine-tuning test
        # self.assertEqual(neuralnet.uses_learning_phase, True)  # Assert ApogeeCNN uses learning phase (bc of Dropout)

        # test basic astroNN model method
        neuralnet.get_weights()
        neuralnet.summary()
        output_shape = neuralnet.output_shape
        input_shape = neuralnet.input_shape
        neuralnet.get_config()
        neuralnet.save_weights(
            'save_weights_test.h5')  # save astroNN weight only
        neuralnet.plot_dense_stats()
        neuralnet.plot_model()

        prediction = neuralnet.test(random_xdata)
        jacobian = neuralnet.jacobian(random_xdata[:2])
        hessian = neuralnet.hessian_diag(random_xdata[:2])
        hessian_full_approx = neuralnet.hessian(random_xdata[:2],
                                                method='approx')
        hessian_full_exact = neuralnet.hessian(random_xdata[:2],
                                               method='exact')

        #  make sure raised if data dimension not as expected
        self.assertRaises(ValueError, neuralnet.jacobian,
                          np.atleast_3d(random_xdata[:3]))
        # make sure evaluate run in testing phase instead of learning phase
        # ie no Dropout which makes model deterministic
        self.assertEqual(
            np.all(
                neuralnet.evaluate(random_xdata, random_ydata) ==
                neuralnet.evaluate(random_xdata, random_ydata)), True)

        # assert shape correct as expected
        np.testing.assert_array_equal(prediction.shape, random_ydata.shape)
        np.testing.assert_array_equal(jacobian.shape, [
            random_xdata[:2].shape[0], random_ydata.shape[1],
            random_xdata.shape[1]
        ])
        np.testing.assert_array_equal(hessian.shape, [
            random_xdata[:2].shape[0], random_ydata.shape[1],
            random_xdata.shape[1]
        ])
        # hessian approx and exact result should have the same shape
        np.testing.assert_array_equal(hessian_full_approx.shape,
                                      hessian_full_exact.shape)

        # save weight and model again
        neuralnet.save(name='apogee_cnn')
        neuralnet.save_weights('save_weights_test.h5')

        # load the model again
        neuralnet_loaded = load_folder("apogee_cnn")
        neuralnet_loaded.plot_dense_stats()
        # assert has model without training because this is a trained model
        self.assertEqual(neuralnet_loaded.has_model, True)
        # fine tune test
        prediction_loaded = neuralnet_loaded.test(random_xdata)

        # ApogeeCNN is deterministic check again
        np.testing.assert_array_equal(prediction, prediction_loaded)

        # Fine tuning test
        neuralnet_loaded.max_epochs = 5
        neuralnet_loaded.callbacks = ErrorOnNaN()
        neuralnet_loaded.train(random_xdata, random_ydata)
        prediction_loaded = neuralnet_loaded.test(random_xdata)

        # prediction should not be equal after fine-tuning
        self.assertRaises(AssertionError, np.testing.assert_array_equal,
                          prediction, prediction_loaded)