Exemple #1
0
    def save(self, node, key, val, type='data'):
        '''
      obj_key      The variable that points to the object being stored
      obj_type     The type of the object being stored
      id      The configuration number being used
      keys         The variables in the object
      vals         and their values
      '''

        # If a groupname is supplied, open/create it and enter that group
        if type == 'group' and pl.is_string_like(key):
            try:
                node = self.h5f.getNode(node, key)
            except:
                node = self.h5f.createGroup(node, key, filters=self.FILTERS)

        # If an id is supplied, make a group for it and enter that group
        elif type == 'id' and pl.is_numlike(key):
            try:
                node = self.h5f.getNode(node, "i%d" % key)
            except:
                node = self.h5f.createGroup(node,
                                            "i%d" % key,
                                            filters=self.FILTERS)

        # When data is supplied, add it (and overwrite any data that was there before)
        elif type == 'data':
            if key == None:
                key = 'None'
            elif not pl.is_string_like(key):
                error(self, 'This should not happen...')

            try:
                self.h5f.removeNode(node, key)
            except:
                pass

            if issubclass(val.__class__, np.ndarray):

                # Empty tuples not supported by pytables. Fix by reshaping to (1,)
                if val.shape == ():
                    val = val.reshape((1, ))

                atom = tb.Atom.from_dtype(val.dtype)
                new_node = self.h5f.createCArray(node,
                                                 key,
                                                 atom,
                                                 val.shape,
                                                 filters=self.FILTERS)
                new_node[:] = val[:]

            else:
                self.h5f.createArray(node, key, val)

        else:
            error(self, 'Hmm? Either \'type\' or \'key\' is of shitty format.')
            node = self.h5f.root

        return node
Exemple #2
0
   def save(self, node, key, val, type='data'):
      '''
      obj_key      The variable that points to the object being stored
      obj_type     The type of the object being stored
      id      The configuration number being used
      keys         The variables in the object
      vals         and their values
      '''
               
      # If a groupname is supplied, open/create it and enter that group
      if type=='group' and pl.is_string_like(key):
         try:
            node = self.h5f.getNode(node, key)
         except:
            node = self.h5f.createGroup(node, key, filters=self.FILTERS)
      
      # If an id is supplied, make a group for it and enter that group
      elif type=='id' and pl.is_numlike(key):
         try:
            node = self.h5f.getNode(node, "i%d"%key)
         except:
            node = self.h5f.createGroup(node, "i%d"%key, filters=self.FILTERS)
      
      # When data is supplied, add it (and overwrite any data that was there before)
      elif type=='data':
         if key == None:
            key = 'None'
         elif not pl.is_string_like(key):
            error(self, 'This should not happen...')
                        
         try:
            self.h5f.removeNode(node, key)
         except:
            pass
         
         
         if issubclass(val.__class__, np.ndarray):
            
            # Empty tuples not supported by pytables. Fix by reshaping to (1,)
            if val.shape == ():
               val = val.reshape((1,))
                      
            atom = tb.Atom.from_dtype(val.dtype)
            new_node = self.h5f.createCArray(node, key, atom, val.shape, filters=self.FILTERS)
            new_node[:] = val[:]
            
         else:
            self.h5f.createArray(node, key, val)

         
      else:
         error(self, 'Hmm? Either \'type\' or \'key\' is of shitty format.')
         node = self.h5f.root
 
      
         
      return node
def read_text_pyfusion(files, target='^Shot .*', ph_dtype=None, plot=pl.isinteractive(), ms=100, hold=0, debug=0, quiet=1,  exception = Exception):
    """ Accepts a file or a list of files, returns a list of structured arrays
    See merge ds_list to merge and convert types (float -> pyfusion.prec_med
    """
    st = seconds(); last_update=seconds()
    file_list = files
    if len(np.shape(files)) == 0: file_list = [file_list]
    f='f8'
    if ph_dtype == None: ph_dtype = [('p12',f),('p23',f),('p34',f),('p45',f),('p56',f)]
    #ph_dtype = [('p12',f)]
    ds_list =[]
    comment_list =[]
    count = 0
    for (i,filename) in enumerate(file_list):
        if seconds() - last_update > 30:
            last_update = seconds()
            print('reading {n}/{t}: {f}'
                  .format(f=filename, n=i, t=len(file_list)))
        try:
            if pl.is_string_like(target): 
                skip = 1+find_data(filename, target,debug=debug)
            else: 
                skip = target
            if quiet == 0:
                print('{t:.1f} sec, loading data from line {s} of {f}'
                      .format(t = seconds()-st, s=skip, f=filename))
            #  this little bit to determine layout of data
            # very inefficient to read twice, but in a hurry!
            txt = np.loadtxt(fname=filename, skiprows=skip-1, dtype=str, 
                             delimiter='FOOBARWOOBAR')
            header_toks = txt[0].split()
            # is the first character of the 2nd last a digit?
            if header_toks[-2][0] in '0123456789': 
                if pyfusion.VERBOSE > 0: 
                    print('found new header including number of phases')
                n_phases = int(header_toks[-2])
                ph_dtype = [('p{n}{np1}'.format(n=n,np1=n+1), f) for n in range(n_phases)]
                
            if 'frlow' in header_toks:  # add the two extra fields
                fs_dtype= [ ('shot','i8'), ('t_mid','f8'), 
                            ('_binary_svs','i8'), 
                            ('freq','f8'), ('amp', 'f8'), ('a12','f8'),
                            ('p', 'f8'), ('H','f8'), 
                            ('frlow','f8'), ('frhigh', 'f8'),('phases',ph_dtype)]
            else:
                fs_dtype= [ ('shot','i8'), ('t_mid','f8'), 
                            ('_binary_svs','i8'), 
                            ('freq','f8'), ('amp', 'f8'), ('a12','f8'),
                            ('p', 'f8'), ('H','f8'), ('phases',ph_dtype)]

            ds_list.append(
                np.loadtxt(fname=filename, skiprows = skip, 
                           dtype= fs_dtype)
            )
            count += 1
            comment_list.append(filename)
        except ValueError, info:
            print('Conversion error while reading {f} with loadtxt - {info}'.format(f=filename, info=info))
Exemple #4
0
    def save_nodes(self,filename,path='./results/'):
        """Saves the contents of a Nodes instance to a npz file."""
        
        attribute = dir(self[0])
        save_str = []
        #Determine which attributes to be saved
        for attribute in dir(self[0]):
            if attribute[0]=='_':
                continue
            elif is_numlike(getattr(self[0],attribute)) or is_string_like(getattr(self[0],attribute)):
                save_str.append(attribute + '=' + 'np.array([self[i].'+attribute+' for i in np.arange(len(self))])')
        #Write save file
        eval('np.savez(path+filename,'+','.join(save_str)+')')

        print 'Saved nodes to file: ', path+filename
        sys.stdout.flush()
Exemple #5
0
def scatter_classic(x, y, s=None, c='b'):
    """
    SCATTER_CLASSIC(x, y, s=None, c='b')

    Make a scatter plot of x versus y.  s is a size (in data coords) and
    can be either a scalar or an array of the same length as x or y.  c is
    a color and can be a single color format string or an length(x) array
    of intensities which will be mapped by the colormap jet.

    If size is None a default size will be used

    Copied from older version of matplotlib -- removed in version 0.9.1
    for whatever reason.
    """
    self = gca()
    if not self._hold: self.cla()
    if is_string_like(c):
        c = [c] * len(x)
    elif not iterable(c):
        c = [c] * len(x)
    else:
        norm = normalize()
        norm(c)
        c = cm.jet(c)

    if s is None:
        s = [abs(0.015 * (amax(y) - amin(y)))] * len(x)
    elif not iterable(s):
        s = [s] * len(x)

    if len(c) != len(x):
        raise ValueError, 'c and x are not equal lengths'
    if len(s) != len(x):
        raise ValueError, 's and x are not equal lengths'

    patches = []
    for thisX, thisY, thisS, thisC in zip(x, y, s, c):
        circ = Circle(
            (thisX, thisY),
            radius=thisS,
        )
        circ.set_facecolor(thisC)
        self.add_patch(circ)
        patches.append(circ)
    self.autoscale_view()
    return patches
Exemple #6
0
    def save_nodes(self, filename, path="./results/"):
        """Saves the contents of a Nodes instance to a npz file."""

        attribute = dir(self[0])
        save_str = []
        # Determine which attributes to be saved
        for attribute in dir(self[0]):
            if attribute[0] == "_":
                continue
            elif is_numlike(getattr(self[0], attribute)) or is_string_like(getattr(self[0], attribute)):
                save_str.append(attribute + "=" + "array([self[i]." + attribute + " for i in arange(len(self))])")

        # Write save file
        eval("np.savez(path+filename," + ",".join(save_str) + ")")

        print "Saved nodes to file: ", path + filename
        sys.stdout.flush()
Exemple #7
0
def scatter_classic(x, y, s=None, c='b'):
    """
    SCATTER_CLASSIC(x, y, s=None, c='b')

    Make a scatter plot of x versus y.  s is a size (in data coords) and
    can be either a scalar or an array of the same length as x or y.  c is
    a color and can be a single color format string or an length(x) array
    of intensities which will be mapped by the colormap jet.

    If size is None a default size will be used

    Copied from older version of matplotlib -- removed in version 0.9.1
    for whatever reason.
    """
    self = gca()
    if not self._hold: self.cla()
    if is_string_like(c):
        c = [c]*len(x)
    elif not iterable(c):
        c = [c]*len(x)
    else:
        norm = normalize()
        norm(c)
        c = cm.jet(c)

    if s is None:
        s = [abs(0.015*(amax(y)-amin(y)))]*len(x)
    elif not iterable(s):
        s = [s]*len(x)

    if len(c)!=len(x):
        raise ValueError, 'c and x are not equal lengths'
    if len(s)!=len(x):
        raise ValueError, 's and x are not equal lengths'

    patches = []
    for thisX, thisY, thisS, thisC in zip(x,y,s,c):
        circ = Circle( (thisX, thisY),
                       radius=thisS,
                       )
        circ.set_facecolor(thisC)
        self.add_patch(circ)
        patches.append(circ)
    self.autoscale_view()
    return patches
Exemple #8
0
def plot_complex_image(image, plot_title='', textsize=18, scale=False):

    limit = 6 * median(abs(image))

    clf()
    if is_string_like(plot_title):
        figtext(0.5,
                0.95,
                plot_title,
                size=1.5 * textsize,
                horizontalalignment='center')
        pass
    subplot(221)
    title("Real")
    if scale:
        imshow(image.real,
               interpolation='nearest',
               vmin=image.imag.min(),
               vmax=image.imag.max())
    else:
        imshow(image.real, interpolation='nearest')
        pass
    colorbar()

    subplot(222)
    title("Imag")
    imshow(image.imag, interpolation='nearest')
    colorbar()

    subplot(223)
    title("Abs")
    if scale:
        imshow(abs(image), interpolation='nearest', vmax=image.imag.max())
    else:
        imshow(abs(image), interpolation='nearest')
        pass
    colorbar()

    subplot(224)
    title("Phase")
    imshow(angle(image), interpolation='nearest')
    colorbar()
    pass
_var_defaults="""

shot_range=[27233]
plot=True
exception=Exception
diag_name='HMP01'
dev_name="LHD"
"""

exec(_var_defaults)

from pyfusion.utils import process_cmd_line_args
exec(process_cmd_line_args())

if pl.is_string_like(shot_range):
    print('loading from file %s ' %(shot_range))
    shot_range = np.loadtxt(shot_range)

device = pyfusion.getDevice(dev_name)

for shot in shot_range:
    try:
        data = device.acq.getdata(shot, diag_name)
        #data = pyfusion.load_channel(shot,chan_name)
        sig=data.signal
        sigac=sig-np.average(sig*np.blackman(len(sig)))
        fs = abs(np.fft.fft(sigac*np.blackman(len(sig))))
        if plot:
            pl.semilogy(fs[0:2000],hold=0)
            pl.title('shot %d, %s' % (shot, diag_name))
Exemple #10
0
_var_defaults = """

shot_range=[27233]
plot=True
exception=Exception
diag_name='HMP01'
dev_name="LHD"
"""

exec(_var_defaults)

from pyfusion.utils import process_cmd_line_args
exec(process_cmd_line_args())

if pl.is_string_like(shot_range):
    print('loading from file %s ' % (shot_range))
    shot_range = np.loadtxt(shot_range)

device = pyfusion.getDevice(dev_name)

for shot in shot_range:
    try:
        data = device.acq.getdata(shot, diag_name)
        #data = pyfusion.load_channel(shot,chan_name)
        sig = data.signal
        sigac = sig - np.average(sig * np.blackman(len(sig)))
        fs = abs(np.fft.fft(sigac * np.blackman(len(sig))))
        if plot:
            pl.semilogy(fs[0:2000], hold=0)
            pl.title('shot %d, %s' % (shot, diag_name))
Exemple #11
0
    def extract(self, dictionary = False, varnames=None, inds = None, limit=None,strict=0, debug=0):
        """ extract the listed variables into the dictionary (local by default)
        selecting those at indices <inds> (all be default
        variables must be strings, either an array, or separated by commas
        
        if the dictionary is False, return them in a tuple instead 
        Note: returning a list requires you to make the order consistent

        if varnames is None - extract all.

        e.g. if da is a dictionary or arrays
        da = DA('mydata.npz')
        da.extract('shot,beta')
        plot(shot,beta)

        (shot,beta,n_e) = da.extract(['shot','beta','n_e'], \
                                      inds=np.where(da['beta']>3)[0])
        # makes a tuple of 3 arrays of data for high beta.  
        Note   syntax of where()! It is evaluted in your variable space.
               to extract one var, need trailing "," (tuple notation) e.g.
                    (allbeta,) = D54.extract('beta',locals())
               which can be abbreviated to
                    allbeta, = D54.extract('beta',locals())
        
        """
        start_mem = report_mem(msg='extract')
        if debug == 0: debug = self.debug
        if varnames == None: varnames = self.da.keys()  # all variables

        if pl.is_string_like(varnames):
            varlist = varnames.split(',')
        else: varlist = varnames
        val_tuple = ()

        if inds == None:
            inds = np.arange(self.len)
        if (len(np.shape(inds))==2): 
            inds = inds[0]   # trick to catch when you forget [0] on where

        if limit != None and len(inds)> abs(limit):
            if limit<0: 
                print('repeatably' ),
                np.random.seed(0)  # if positive, should be random
                                   # negative will repeat the sequence
            else: print('randomly'),
                
            print('decimating from sample of {n} and'.format(n=len(inds))),
            ir = np.where(np.random.random(len(inds))
                          < float(abs(limit))/len(inds))[0]
            inds = inds[ir]

        if len(inds)<500: print('*** {n} is a very small number to extract????'
                                .format(n=len(inds)))

        if self.verbose>0:
            print('extracting a sample of {n} '.format(n=len(inds)))

        for k in varlist:
            if k in self.keys:
                debug_(debug,key='extract')
                if hasattr(self.da[k],'keys'):
                    allvals = self.da[k]
                else:
                    allvals = np.array(self.da[k])

                if len(np.shape(allvals)) == 0:
                    sel_vals = allvals
                else: 
                    sel_vals = allvals[inds]
                if dictionary == False: 
                    val_tuple += (sel_vals,)
                else:
                    dictionary.update({k: sel_vals})
            else: print('variable {k} not found in {ks}'.
                        format(k=k, ks = np.sort(self.da.keys())))
        report_mem(start_mem)
        if dictionary == False: 
            return(val_tuple)
Exemple #12
0
def sp(ds, x=None, y=None, sz=None, col=None, decimate=0, ind = None, nomode=None,
       size_scale=None, dot_size=30, hold=0, seed=None, colorbar=None, legend=True, marker='o'):
    """ Scatter plot front end, size_scale 
    x, y, sz, col can be keys or variables (of matching size to ds)
    decimate = 0.1 selects 10% of the input, -0.1 uses a fixed key. 
    """
    def size_val(marker_size):
        if size_scale<0: 
            return(-size_scale*np.exp(np.sqrt(marker_size/(dot_size/20))))
        else:
            return(size_scale*(np.sqrt(marker_size/dot_size)))
 
    if type(ds) == type({}): keys = np.sort(ds.keys())
    elif type(ds) == np.ndarray: keys = np.sort(ds.dtype.names)
    else: raise ValueError(
        'First argument must be a dictionary of arrays or an '
            'array read from loadtxt')

    if x == None: x = keys[0]        
    if y == None: y = keys[1]        
    if col == None: col = keys[2]        


    # deal with the indices first, so we can consider indexing x,y earlier
    if ind == None: 
        if pl.is_string_like(x):
            lenx = len(ds[x])
        else: 
            lenx = len(x)
        ind = np.arange(lenx)

    if seed != None: np.random.seed(seed)
    if decimate != 0: 
        if decimate<0: np.random.seed(0)  # fixed seed for decimate<0
        ind = ind[(np.where(np.random.rand(len(ind))<abs(decimate)))[0]]
    else:  # decimate if very long array and decimate == 0
        if (len(ind) > 2e4):   # 2e5 for scatter, 1e5 for plot
            print('Decimating automatically as data length too long [{0}]'
                  .format(len(ind)))
            ind = ind[np.where(np.random.rand(len(ind))<(2e4/len(ind)))[0]]
            
    if pl.is_string_like(x):
        x_string = x
        x = ds[x]
    else:
        x_string = ''

    if pl.is_string_like(y):
        y_string = y
        y = ds[y]
    else:
        y_string = ''

    size_string = '<size>'
    color_string = '<color>'

    if pl.is_string_like(col): 
        if np.any(np.array(keys)== col):
            color_string = col
            col=ds[col][ind]
        else: col = col  # colour is hardwired    
    else:
        if col == None: col='b'
        else:
            col = np.array(col)[ind]
        color_string = ''

    if nomode == None:
        if hasattr(col, 'dtype'):
            col_dtype = col.dtype
            minint = np.iinfo(col_dtype).min
            nomode = minint
        else:
            if len(col) != 0:
                nomode = dp.iinfo(col[0]).min
            else:
                nomode = dp.iinfo(col).min

    w_not_nomode = np.where(nomode != col)[0]
            # shrink ind further to avoid displaying unidentified modes
    ind = ind[w_not_nomode]
    col = col[w_not_nomode]

    if sz == None: sz=20 * np.ones(len(x))
    if pl.is_string_like(sz): 
        size_string = sz # size scale is the value giving a dot size of dot_size
        sz=ds[sz]
    else: # must be a number or an array
        sz = np.array(sz)

    if size_scale==None: size_scale = max(sz[ind])

    if size_scale<0:  # negative is a log scale
        sz=dot_size/20*(np.log(sz[ind]/-size_scale))**2
    else: 
        sz=dot_size*(sz[ind]/size_scale)  # squarung may make sense, but too big

        
    if max(sz)>1000: 
        if pl.isinteractive():
            inp=raw_input('huge circles, radius~ {szm:.3g}, Y/y to continue'
                          .format(szm = max(sz.astype(float)))) # bug! shouldn't need asfloat
            if inp.upper() != 'Y': raise ValueError
        else:
             warn('reducing symbol size')
             sz=200/max(sz) * sz
    debug_(debug,3)

    if hold==0: pl.clf()    
    coll = pl.scatter(x[ind],y[ind],sz,col, hold=hold,marker=marker,label='')
#    pl.legend(coll   # can't select an element out of a CircleCollection
    sizes = coll.get_sizes()
    max_size=max(sizes)
    big=matplotlib.collections.CircleCollection([max_size])
    med=matplotlib.collections.CircleCollection([max_size/10])
    sml=matplotlib.collections.CircleCollection([max_size/100])
    if legend == True:
        pl.legend([big,med,sml],
                  [("%s=%.3g" % (size_string,size_val(max_size))),
                   ("%.3g" % (size_val(max_size/10))),
                   ("%.3g" % (size_val(max_size/100)))])

    pl.xlabel(x_string)
    pl.ylabel(y_string)
    pl.title('size=%s, colour=%s' % (size_string, color_string))
    if colorbar == None and len(col) > 1:
        colorbar = True
    if colorbar: pl.colorbar()
Exemple #13
0
def sp(ds,
       x=None,
       y=None,
       sz=None,
       col=None,
       decimate=0,
       ind=None,
       nomode=None,
       size_scale=None,
       dot_size=30,
       hold=0,
       seed=None,
       colorbar=None,
       legend=True,
       marker='o'):
    """ Scatter plot front end, size_scale 
    x, y, sz, col can be keys or variables (of matching size to ds)
    decimate = 0.1 selects 10% of the input, -0.1 uses a fixed key. 
    """
    def size_val(marker_size):
        if size_scale < 0:
            return (-size_scale * np.exp(np.sqrt(marker_size /
                                                 (dot_size / 20))))
        else:
            return (size_scale * (np.sqrt(marker_size / dot_size)))

    if type(ds) == type({}): keys = np.sort(ds.keys())
    elif type(ds) == np.ndarray: keys = np.sort(ds.dtype.names)
    else:
        raise ValueError('First argument must be a dictionary of arrays or an '
                         'array read from loadtxt')

    if x == None: x = keys[0]
    if y == None: y = keys[1]
    if col == None: col = keys[2]

    # deal with the indices first, so we can consider indexing x,y earlier
    if ind == None:
        if pl.is_string_like(x):
            lenx = len(ds[x])
        else:
            lenx = len(x)
        ind = np.arange(lenx)

    if seed != None: np.random.seed(seed)
    if decimate != 0:
        if decimate < 0: np.random.seed(0)  # fixed seed for decimate<0
        ind = ind[(np.where(np.random.rand(len(ind)) < abs(decimate)))[0]]
    else:  # decimate if very long array and decimate == 0
        if (len(ind) > 2e4):  # 2e5 for scatter, 1e5 for plot
            print('Decimating automatically as data length too long [{0}]'.
                  format(len(ind)))
            ind = ind[np.where(np.random.rand(len(ind)) < (2e4 / len(ind)))[0]]

    if pl.is_string_like(x):
        x_string = x
        x = ds[x]
    else:
        x_string = ''

    if pl.is_string_like(y):
        y_string = y
        y = ds[y]
    else:
        y_string = ''

    size_string = '<size>'
    color_string = '<color>'

    if pl.is_string_like(col):
        if np.any(np.array(keys) == col):
            color_string = col
            col = ds[col][ind]
        else:
            col = col  # colour is hardwired
    else:
        if col == None: col = 'b'
        else:
            col = np.array(col)[ind]
        color_string = ''

    if nomode == None:
        if hasattr(col, 'dtype'):
            col_dtype = col.dtype
            minint = np.iinfo(col_dtype).min
            nomode = minint
        else:
            if len(col) != 0:
                nomode = dp.iinfo(col[0]).min
            else:
                nomode = dp.iinfo(col).min

    w_not_nomode = np.where(nomode != col)[0]
    # shrink ind further to avoid displaying unidentified modes
    ind = ind[w_not_nomode]
    col = col[w_not_nomode]

    if sz == None: sz = 20 * np.ones(len(x))
    if pl.is_string_like(sz):
        size_string = sz  # size scale is the value giving a dot size of dot_size
        sz = ds[sz]
    else:  # must be a number or an array
        sz = np.array(sz)

    if size_scale == None: size_scale = max(sz[ind])

    if size_scale < 0:  # negative is a log scale
        sz = dot_size / 20 * (np.log(sz[ind] / -size_scale))**2
    else:
        sz = dot_size * (sz[ind] / size_scale
                         )  # squarung may make sense, but too big

    if max(sz) > 1000:
        if pl.isinteractive():
            inp = raw_input(
                'huge circles, radius~ {szm:.3g}, Y/y to continue'.format(
                    szm=max(sz.astype(float))))  # bug! shouldn't need asfloat
            if inp.upper() != 'Y': raise ValueError
        else:
            warn('reducing symbol size')
            sz = 200 / max(sz) * sz
    debug_(debug, 3)

    if hold == 0: pl.clf()
    coll = pl.scatter(x[ind],
                      y[ind],
                      sz,
                      col,
                      hold=hold,
                      marker=marker,
                      label='')
    #    pl.legend(coll   # can't select an element out of a CircleCollection
    sizes = coll.get_sizes()
    max_size = max(sizes)
    big = matplotlib.collections.CircleCollection([max_size])
    med = matplotlib.collections.CircleCollection([max_size / 10])
    sml = matplotlib.collections.CircleCollection([max_size / 100])
    if legend == True:
        pl.legend([big, med, sml],
                  [("%s=%.3g" % (size_string, size_val(max_size))),
                   ("%.3g" % (size_val(max_size / 10))),
                   ("%.3g" % (size_val(max_size / 100)))])

    pl.xlabel(x_string)
    pl.ylabel(y_string)
    pl.title('size=%s, colour=%s' % (size_string, color_string))
    if colorbar == None and len(col) > 1:
        colorbar = True
    if colorbar: pl.colorbar()
Exemple #14
0
    def __save__(self, root_node, self_key, conf, new_conf):
        '''
      self_key    The variable in the calling class that points to self
      db          The database
      depth       The recursive level'''
        import mynumpy as np
        from gfuncs import is_builtin_type, is_np_type
        # Process the optional arguments
        #      opts = {'is_subclass':False}
        #      opts = processArgs(args, opts)

        db = Configurable.__db__

        # Start by adding the key for this class, if supplied.
        if self_key != None and pl.is_string_like(self_key):
            new_conf[self_key] = {}
            new_conf = new_conf[self_key]
            new_conf['class_type'] = self.__class__
            class_node = db.save(root_node,
                                 self.__class__.__name__,
                                 None,
                                 type='group')

            if conf != None:
                conf = conf[self_key]

        # Then add the configuration number
        # Check if the data already exists (in which case a new slot is not needed)

        # If this is the first configuration, we'll __save__ the data at the first slot

        # If one or more configurations exist, and the 'is_new' flag is given, we
        # select a slot with equal contents, if such a slot exists. If not, a new
        # slot is created and the data is saved there.

        # To ensure that no data is stored twice, this search must be performed
        # each time.

        was_equal, id = self.__find_new_slot__(root_node, conf)
        new_conf['id'] = id
        new_conf['class_attr'] = {}
        new_conf = new_conf['class_attr']
        id_node = db.save(class_node, id, None, type='id')

        # Iterate over all class instance variables
        for key, val in self.__dict__.iteritems():
            vc = val.__class__

            #         print key

            #if key=='Xd':
            #   print 'hello'

            # Skip empty types
            if val == None:
                pass

            # If the variable is another configurable class, call its __save__()
            if issubclass(val.__class__, Configurable):
                if vc == np.Ndarray:
                    db.save(id_node, key, val, type='data')
                    if key not in new_conf:
                        new_conf[key] = val.__class__
                else:
                    val.__save__(root_node, key, conf, new_conf)

            # Save normal datatypes to the database
            if is_builtin_type(vc) or is_np_type(vc):

                if key == None:
                    key = 'None'

                db.save(id_node, key, val, type='data')
                if key not in new_conf:
                    new_conf[key] = val.__class__

            # NOTE: Be careful with this one. Prolly try to avoid these types
            elif type(val) == tuple or type(val) == list:

                if type(val) == tuple:
                    tw = TupleWrapper()
                else:
                    tw = ListWrapper()

                tw.len = val.__len__()

                for i in range(0, tw.len):
                    if val[i] == None:
                        val[i] = 'None'
                    setattr(tw, 'i%d' % i, val[i])

                tw.__save__(root_node, key, conf, new_conf)

            elif type(val) == dict:

                dw = DictWrapper()

                for subkey in val:
                    if subkey == None:
                        setattr(dw, 'None', val[subkey])
                    else:
                        setattr(dw, subkey, val[subkey])

                dw.__save__(root_node, key, conf, new_conf)
Exemple #15
0
    def extract(self,
                dictionary=False,
                varnames=None,
                inds=None,
                limit=None,
                strict=0,
                debug=0):
        """ extract the listed variables into the dictionary (local by default)
        selecting those at indices <inds> (all be default
        variables must be strings, either an array, or separated by commas
        
        if the dictionary is False, return them in a tuple instead 
        Note: returning a list requires you to make the order consistent

        if varnames is None - extract all.

        e.g. if da is a dictionary or arrays
        da = DA('mydata.npz')
        da.extract('shot,beta')
        plot(shot,beta)

        (shot,beta,n_e) = da.extract(['shot','beta','n_e'], \
                                      inds=np.where(da['beta']>3)[0])
        # makes a tuple of 3 arrays of data for high beta.  
        Note   syntax of where()! It is evaluted in your variable space.
               to extract one var, need trailing "," (tuple notation) e.g.
                    (allbeta,) = D54.extract('beta',locals())
               which can be abbreviated to
                    allbeta, = D54.extract('beta',locals())
        
        """
        start_mem = report_mem(msg='extract')
        if debug == 0: debug = self.debug
        if varnames == None: varnames = self.da.keys()  # all variables

        if pl.is_string_like(varnames):
            varlist = varnames.split(',')
        else:
            varlist = varnames
        val_tuple = ()

        if inds == None:
            inds = np.arange(self.len)
        if (len(np.shape(inds)) == 2):
            inds = inds[0]  # trick to catch when you forget [0] on where

        if limit != None and len(inds) > abs(limit):
            if limit < 0:
                print('repeatably'),
                np.random.seed(0)  # if positive, should be random
                # negative will repeat the sequence
            else:
                print('randomly'),

            print('decimating from sample of {n} and'.format(n=len(inds))),
            ir = np.where(
                np.random.random(len(inds)) < float(abs(limit)) / len(inds))[0]
            inds = inds[ir]

        if len(inds) < 500:
            print('*** {n} is a very small number to extract????'.format(
                n=len(inds)))

        if self.verbose > 0:
            print('extracting a sample of {n} '.format(n=len(inds)))

        for k in varlist:
            if k in self.keys:
                debug_(debug, key='extract')
                if hasattr(self.da[k], 'keys'):
                    allvals = self.da[k]
                else:
                    allvals = np.array(self.da[k])

                if len(np.shape(allvals)) == 0:
                    sel_vals = allvals
                else:
                    sel_vals = allvals[inds]
                if dictionary == False:
                    val_tuple += (sel_vals, )
                else:
                    dictionary.update({k: sel_vals})
            else:
                print('variable {k} not found in {ks}'.format(
                    k=k, ks=np.sort(self.da.keys())))
        report_mem(start_mem)
        if dictionary == False:
            return (val_tuple)
Exemple #16
0
   def __save__(self, root_node, self_key, conf, new_conf):
      '''
      self_key    The variable in the calling class that points to self
      db          The database
      depth       The recursive level'''
      import mynumpy as np
      from gfuncs import is_builtin_type, is_np_type
      # Process the optional arguments
#      opts = {'is_subclass':False}
#      opts = processArgs(args, opts)
      
      db = Configurable.__db__
      
      # Start by adding the key for this class, if supplied.
      if self_key != None and pl.is_string_like(self_key):
         new_conf[self_key] = {}
         new_conf = new_conf[self_key]
         new_conf['class_type'] = self.__class__
         class_node = db.save(root_node, self.__class__.__name__, None, type='group')
         
         if conf != None:
            conf = conf[self_key]
              
      # Then add the configuration number
      # Check if the data already exists (in which case a new slot is not needed)
      
      # If this is the first configuration, we'll __save__ the data at the first slot
     
      
      # If one or more configurations exist, and the 'is_new' flag is given, we
      # select a slot with equal contents, if such a slot exists. If not, a new
      # slot is created and the data is saved there.
      
      # To ensure that no data is stored twice, this search must be performed
      # each time.

      was_equal, id = self.__find_new_slot__(root_node, conf)
      new_conf['id'] = id
      new_conf['class_attr'] = {}
      new_conf = new_conf['class_attr']
      id_node = db.save(class_node, id, None, type='id')
      
     
      # Iterate over all class instance variables
      for key,val in self.__dict__.iteritems():
         vc = val.__class__
         
#         print key
         
         #if key=='Xd':
         #   print 'hello'
        
         # Skip empty types
         if val == None:
            pass

         # If the variable is another configurable class, call its __save__()
         if issubclass(val.__class__, Configurable):
            if vc == np.Ndarray:
               db.save(id_node, key, val, type='data')
               if key not in new_conf:
                  new_conf[key] = val.__class__
            else:
               val.__save__(root_node, key, conf, new_conf)
         
         # Save normal datatypes to the database
         if is_builtin_type(vc) or is_np_type(vc):
            
            if key == None:
               key = 'None'
            
            db.save(id_node, key, val, type='data')
            if key not in new_conf:
               new_conf[key] = val.__class__
            
         # NOTE: Be careful with this one. Prolly try to avoid these types
         elif type(val) == tuple or type(val) == list:
            
            if type(val) == tuple:
               tw = TupleWrapper()
            else:
               tw = ListWrapper()
               
            tw.len = val.__len__()
            
            for i in range(0,tw.len):
               if val[i] == None:
                  val[i] = 'None'
               setattr(tw, 'i%d'%i, val[i])
            
            tw.__save__(root_node, key, conf, new_conf)
            
         elif type(val) == dict:
            
            dw = DictWrapper()
            
            for subkey in val:
               if subkey == None:
                  setattr(dw, 'None', val[subkey])
               else:
                  setattr(dw, subkey, val[subkey])
            
            dw.__save__(root_node, key, conf, new_conf)
Exemple #17
0
def read_text_pyfusion(files,
                       target='^Shot .*',
                       ph_dtype=None,
                       plot=pl.isinteractive(),
                       ms=100,
                       hold=0,
                       debug=0,
                       quiet=1,
                       exception=Exception):
    """ Accepts a file or a list of files, returns a list of structured arrays
    See merge ds_list to merge and convert types (float -> pyfusion.prec_med
    """
    st = seconds()
    last_update = seconds()
    file_list = files
    if len(np.shape(files)) == 0: file_list = [file_list]
    f = 'f8'
    if ph_dtype == None:
        ph_dtype = [('p12', f), ('p23', f), ('p34', f), ('p45', f), ('p56', f)]
    #ph_dtype = [('p12',f)]
    ds_list = []
    comment_list = []
    count = 0
    for (i, filename) in enumerate(file_list):
        if seconds() - last_update > 30:
            last_update = seconds()
            print('reading {n}/{t}: {f}'.format(f=filename,
                                                n=i,
                                                t=len(file_list)))
        try:
            if pl.is_string_like(target):
                skip = 1 + find_data(filename, target, debug=debug)
            else:
                skip = target
            if quiet == 0:
                print('{t:.1f} sec, loading data from line {s} of {f}'.format(
                    t=seconds() - st, s=skip, f=filename))
            #  this little bit to determine layout of data
            # very inefficient to read twice, but in a hurry!
            txt = np.loadtxt(fname=filename,
                             skiprows=skip - 1,
                             dtype=str,
                             delimiter='FOOBARWOOBAR')
            header_toks = txt[0].split()
            # is the first character of the 2nd last a digit?
            if header_toks[-2][0] in '0123456789':
                if pyfusion.VERBOSE > 0:
                    print('found new header including number of phases')
                n_phases = int(header_toks[-2])
                ph_dtype = [('p{n}{np1}'.format(n=n, np1=n + 1), f)
                            for n in range(n_phases)]

            if 'frlow' in header_toks:  # add the two extra fields
                fs_dtype = [('shot', 'i8'), ('t_mid', 'f8'),
                            ('_binary_svs', 'i8'), ('freq', 'f8'),
                            ('amp', 'f8'), ('a12', 'f8'), ('p', 'f8'),
                            ('H', 'f8'), ('frlow', 'f8'), ('frhigh', 'f8'),
                            ('phases', ph_dtype)]
            else:
                fs_dtype = [('shot', 'i8'), ('t_mid', 'f8'),
                            ('_binary_svs', 'i8'), ('freq', 'f8'),
                            ('amp', 'f8'), ('a12', 'f8'), ('p', 'f8'),
                            ('H', 'f8'), ('phases', ph_dtype)]

            ds_list.append(
                np.loadtxt(fname=filename, skiprows=skip, dtype=fs_dtype))
            count += 1
            comment_list.append(filename)
        except ValueError, info:
            print('Conversion error while reading {f} with loadtxt - {info}'.
                  format(f=filename, info=info))