예제 #1
0
def get_hj_modules(force_recompile=False):
    """ find the path to the exes, and optionally (if force_recompile=True) or automatically recompile
    """
    # append python version number - but hide '.' 
    hj_module  = 'gethjdata'+sys.version[0:3].replace('.','_')
    # Note: 3.4 may need f2py3.4 - 3.5 f2py3 gives PyCObject_Type error
    f2py = 'f2py3' if sys.version >= '3,0' else 'f2py'
    oldwd = os.getcwd()

    short_host_name = os.uname()[1]
    cdir = os.path.dirname(os.path.abspath(__file__))
    #    cd = 'cd {cdir}; '.format(cdir=cdir)
    exe_path = os.path.join(cdir,short_host_name)
    if not(os.path.isdir(exe_path)):
        print('creating {d}'.format(d=exe_path))
        os.mkdir(exe_path)

    try:
        if pyfusion.DBG() > 0: print('try import')
        import_module(hj_module,dict1=locals())
    except Exception as reason:
        print("Can't import {m} as get_hjdata at first attempt:  reason - {r}, {args}"
              .format(r=reason, args=reason.args, m=hj_module))
        force_recompile = True
    # Should use subprocess instead of command, and get more feedback
    if force_recompile:
        os.chdir(exe_path) # move to the exe dir (although module stays one up ../
        import subprocess
        print('Compiling Heliotron J data aquisition library, please wait...')
        ## Note: g77 will do, (remove --fcompiler-g95)  but can't use TRIM function etc 
        cmds = ['gcc -c -fPIC ../libfdata.c ../intel.c',
                # f2py won't accept ../ or full path in the -m param! need to cd
                'cd ..; {f} --fcompiler=gfortran -c -m {m} {xp}.o -lm  hj_get_data.f'
                .format(m=hj_module, f=f2py,xp=os.path.join(exe_path,'*')),
                'f77 -Lcdata ../save_h_j_data.f intel.o libfdata.o -o {exe}'
                .format(exe=os.path.join(exe_path,'save_h_j_data')), # 2015
            ]
        print(cmds)
        for cmd in cmds:
            sub_pipe = subprocess.Popen(cmd,  shell=True, stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE)
            (resp,err) = sub_pipe.communicate()
            if resp !='': print(resp[-10:])
            if (err != b'') or (sub_pipe.returncode != 0): 
                print(resp,err,'.') #
                print(err.split(b'\n')[-2]),
                print('compile subproc returned',sub_pipe.returncode)
                if sub_pipe.returncode != 0:
                    print('*****ERROR****',200*'*','\n')
        try:
            print('try after compiling...'),
            import_module(hj_module,dict1=locals())
        except Exception as reason:
            print("Can't import {m} as get_hjdata at second attempt {r}, {args}"
                  .format(r=reason, args=reason.args, m=hj_module))
            raise ImportError("Can't import Heliotron J data acquisition library, staying in exe dir")
        
    os.chdir(oldwd)
    return(hj_module, exe_path)
예제 #2
0
def normalise(input_data, method=None, separate=False):
    """ method=None -> default, method=0 -> DON'T normalise
    """
    from numpy import mean, sqrt, max, abs, var, atleast_2d
    from pyfusion.data.base import DataSet
    # this allows method='0'(or 0) to prevent normalisation for cleaner code
    # elsewhere
    if pyfusion.DBG() > 3: print('separate = %d' % (separate))
    if (method == 0) or (method == '0'):
        return (input_data)

    if (method is None) or (method.lower() == "none"): method = 'rms'
    if isinstance(input_data, DataSet):
        output_dataset = DataSet(input_data.label + "_normalise")
        for d in input_data:
            output_dataset.add(normalise(d, method=method, separate=separate))
        return output_dataset
    if method.lower() in ['rms', 'r']:
        if input_data.signal.ndim == 1:
            norm_value = sqrt(mean(input_data.signal**2))
        else:
            rms_vals = sqrt(mean(input_data.signal**2, axis=1))
            if separate == False:
                rms_vals = max(rms_vals)
            norm_value = atleast_2d(rms_vals).T
    elif method.lower() in ['peak', 'p']:
        if input_data.signal.ndim == 1:
            norm_value = abs(input_data.signal).max(axis=0)
        else:
            max_vals = abs(input_data.signal).max(axis=1)
            if separate == False:
                max_vals = max(max_vals)
            norm_value = atleast_2d(max_vals).T
    elif method.lower() in ['var', 'variance', 'v']:
        # this is strange because it over-compensates - if we use sqrt(var()) it would be the same as RMS
        if input_data.signal.ndim == 1:
            norm_value = var(input_data.signal)
        else:
            var_vals = var(input_data.signal, axis=1)
            if separate == False:
                var_vals = max(var_vals)
            norm_value = atleast_2d(var_vals).T
    input_data.signal = input_data.signal / norm_value
    #print('norm_value = %s' % norm_value)
    norm_hist = ','.join(
        ["{0:.2g}".format(float(v)) for v in norm_value.flatten()])
    input_data.history += "\n:: norm_value =[{0}]".format(norm_hist)
    input_data.history += ", method={0}, separate={1}".format(method, separate)
    input_data.scales = norm_value
    input_data.norm_method = method + ':' + ['all', 'sep'][separate]

    debug_(pyfusion.DEBUG,
           level=2,
           key='normalise',
           msg='about to return from normalise')
    return input_data
예제 #3
0
def svd(input_data):
    from .timeseries import SVDData
    svddata = SVDData(input_data.timebase, input_data.channels,
                      linalg.svd(input_data.signal, 0))
    svddata.history = input_data.history
    svddata.scales = input_data.scales  # need to pass it on to caller
    svddata.norm_method = input_data.norm_method  # this too
    if pyfusion.DBG() > 4: print("input_data.scales", input_data.scales)
    debug_(pyfusion.DEBUG, level=2, key='svd', msg='about to return from svd')
    return svddata
예제 #4
0
def get_delay(shot):
    if shot >= 85000:
        delay = 0.0
        print('get_basic_diagnostics - should fix with fetch')

    elif shot >= 46455:
        delay = 0.2
    elif shot >= 46357:
        delay = 0.4
    elif shot >= 38067:
        delay = 0.1
    elif shot >= 36185:
        delay = 0.3
    elif shot >= 36142:
        delay = 0.1
    elif shot >= 31169:
        delay = 0.3
    else:
        delay = 0.1
    if pyfusion.DBG() > 0: print('delay', delay)
    return (delay)
예제 #5
0
    def __init__(self,
                 filename=None,
                 fileformat=None,
                 shot=None,
                 verbose=0,
                 plot=False,
                 debug=0,
                 hold=True):
        """ read in data, optionally plot (True gets default channel, N gets ch=N
        if filename contains a {, assume it is a format
        let get_basic_params deal with .bz2 files etc for now.
        """
        self.debug = debug
        if filename is None or "{" in filename:
            self.shot = int(shot)
            if "{" in filename: self.filename = filename.format(self.shot)
            else: self.filename = fileformat.format(self.shot)
            # prepend the diag name ahead
            folder = self.filename.split('@')[0]
            self.filename = os.path.join(folder, self.filename)
        else:
            self.filename = filename

        path_to_igetfile = os.getenv('IGETFILE')
        if path_to_igetfile != None:  # returns None starting 2.6
            self.filename = call_igetfile(path_to_igetfile, self.filename)

        (self.data, self.vardict) = read_igetfile(filename=self.filename,
                                                  verbose=0,
                                                  plot=plot,
                                                  hold=hold,
                                                  debug=debug)
        self.valnames = self.vardict['ValName']  # shortcut
        # get rid of file if we got it from the server
        if path_to_igetfile != None and pyfusion.DBG() < 3:
            os.remove(self.filename)

        if plot != 0:
            if type(plot) == type(True): self.plot()
            else: self.plot(ch=plot)
예제 #6
0
 def get_kh(self):
     # TODO: shouldn't need to worry about fetch mode here...
     imain2_path = '\h1data::top.operations.magnetsupply.lcu.setup_main.i2'
     isec2_path = '\h1data::top.operations.magnetsupply.lcu.setup_sec.i2'
     if self.fetch_mode == 'thin client':
         try:
             imain2 = self.acq.connection.get(imain2_path)
             isec2 = self.acq.connection.get(isec2_path)
             return float(isec2/imain2)
         except:
             return None
     elif self.fetch_mode == 'http':
         print("http fetch of k_h disabled until supported by H1DS")
         return -1.0
         """
         imain2_path_comp = get_tree_path(imain2_path)
         isec2_path_comp = get_tree_path(isec2_path)
         
         imain2_url = self.acq.server + '/'.join([imain2_path_comp['tree'],
                                                  str(self.shot),
                                                  imain2_path_comp['tagname'],
                                                  imain2_path_comp['nodepath']])
         isec2_url = self.acq.server + '/'.join([isec2_path_comp['tree'],
                                                 str(self.shot),
                                                 isec2_path_comp['tagname'],
                                                 isec2_path_comp['nodepath']])
         imain2 = mdsweb.data_from_url(imain2_url)
         isec2 = mdsweb.data_from_url(isec2_url)
         return float(isec2/imain2)
         """
         
     else:
         try:
             imain2 = self.tree.getNode(imain2_path)
             isec2 = self.tree.getNode(isec2_path)
             return float(isec2/imain2)
         except:
             if pyfusion.DBG() > 0: 
                 traceback.print_exc()
             return None
예제 #7
0
def get_basic_diagnostics(diags=None,
                          shot=54196,
                          times=None,
                          delay=None,
                          exception=False,
                          debug=0):
    """ return a list of np.arrays of normally numeric values for the 
    times given, for the given shot.
    Will access server if env('IGETFILE') points to an exe, else accesses cache
    """

    global lhd_summary
    # if no exception given and we are not debugging
    # note - exception=None is a valid entry, meaning tolerate no exceptions
    # so the "default" we use is False
    if exception == False and debug == 0: exception = Exception

    if diags is None: diags = "<n_e19>,b_0,i_p,w_p,dw_pdt,dw_pdt2".split(',')
    if len(np.shape(diags)) == 0: diags = [diags]
    if delay is None: delay = get_delay(shot)

    if times is None:
        times = np.linspace(0, 4, 4000)

    times = np.array(times)
    vals = {}
    # create an extra time array to allow a cross check
    vals.update({'check_tm': times})
    vals.update({'check_shot': np.zeros(len(times), dtype=np.int) + shot})
    for diag in diags:
        if diag not in file_info:
            warn('diagnostic {0} not found in shot {1}'.format(diag, shot),
                 stacklevel=2)
            vals.update({diag: np.nan + times})
        else:
            info = file_info[diag]
            varname = info['name']
            subfolder = info['format'].split('@')[0]
            filepath = os.path.sep.join(
                [localigetfilepath, subfolder, info['format']])
            if ':' in varname: (oper, varname) = varname.split(':')
            else: oper = None

            if info['format'].find('.csv') > 0:
                try:
                    test = lhd_summary.keys()
                except:
                    csvfilename = acq_LHD + '/' + info['format']
                    if pyfusion.DBG() > 1:
                        print('looking for lhd summary in' + csvfilename)
                    if not os.path.exists(csvfilename):
                        csvfilename += ".bz2"
                    print('reloading {0}'.format(csvfilename))
                    lhd_summary = read_csv_data(csvfilename, header=3)
                    # should make this more formal - last shots
                    # from an 'extra' file, and finally, from shot info
                if shot > 117000:  # fudge to get latest data
                    lhd_summary = np.load(acq_LHD +
                                          '/LHD_summary.npz')['LHD'].tolist()
                    print('loading newer shots from a separate file - fix-me')
                    #  val = lhd_summary[varname][shot-70000]    # not needed
                #  else:
                val = lhd_summary[varname][shot]
                valarr = np.double(val) + (times * 0)
            else:
                debug_(max(pyfusion.DBG(), debug), level=4, key='find_data')
                try:

                    dg = igetfile(filepath, shot=shot, debug=debug - 1)
                except IOError:
                    try:
                        dg = igetfile(filepath + '.bz2',
                                      shot=shot,
                                      debug=debug - 1)
                    except IOError:
                        try:
                            dg = igetfile(filepath + '.gz',
                                          shot=shot,
                                          debug=debug - 1)
                        except exception as details:
                            if debug > 0:
                                print('diag at {fp} not found'.format(
                                    fp=filepath))
                            print(details, details.args)
                            dg = None
                            #break  # give up and try next diagnostic
                if dg is None:  # messy - break doesn't do what I want?
                    valarr = None
                else:
                    nd = dg.vardict['DimNo']
                    if nd != 1:
                        raise ValueError(
                            'Expecting a 1 D array in {0}, got {1}!'.format(
                                dg.filename, nd))

                    # pre re. w = np.where(np.array(dg.vardict['ValName'])==varname)[0]
                    matches = [
                        re.match(varname, nam) != None
                        for nam in dg.vardict['ValName']
                    ]
                    w = np.where(np.array(matches) != False)[0]
                    # get the column(s) of the array corresponding to the name
                    if (oper in 'sum,average,rms,max,min'.split(',')):
                        if oper == 'sum': op = np.sum
                        elif oper == 'average': op = np.average
                        elif oper == 'min': op = np.min
                        elif oper == 'std': op = np.std
                        else:
                            raise ValueError(
                                'operator {o} in {n} not known to get_basic_diagnostics'
                                .format(o=oper, n=info['name']))
                        valarr = op(dg.data[:, nd + w], 1)
                    else:
                        if len(w) != 1:
                            raise LookupError(
                                'Need just one instance of variable {0} in {1}'
                                .format(varname, dg.filename))
                        if len(np.shape(dg.data)) != 2:
                            raise LookupError(
                                'insufficient data for {0} in {1}'.format(
                                    varname, dg.filename))

                        valarr = dg.data[:, nd + w[0]]

                    tim = dg.data[:, 0] - delay

                    if oper == 'ddt':  # derivative operator
                        valarr = np.diff(valarr) / (np.average(np.diff(tim)))
                        tim = (tim[0:-1] + tim[1:]) / 2.0

                    if oper == 'ddt2':  # abd(ddw)*derivative operator
                        dw = np.diff(valarr) / (np.average(np.diff(tim)))
                        ddw = np.diff(dw) / (np.average(np.diff(tim)))
                        tim = tim[2:]
                        valarr = 4e-6 * dw[1:] * np.abs(ddw)

                    if (len(tim) < 10) or (np.std(tim) < 0.1):
                        raise ValueError('Insufficient points or degenerate'
                                         'timebase data in {0}, {1}'.format(
                                             varname, dg.filename))

                    valarr = (stineman_interp(times, tim, valarr))
                    w = np.where(times > max(tim))
                    valarr[w] = np.nan

            if valarr != None: vals.update({diag: valarr})
    debug_(max(pyfusion.DBG(), debug), level=5, key='interp')
    return (vals)
예제 #8
0
def filter_fourier_bandpass(input_data,
                            passband,
                            stopband,
                            taper=None,
                            debug=None):
    """ 
    Note: Is MUCH (2.2x faster) more efficient to use real ffts, (implemented April)
    Use a Fourier space taper/tophat or pseudo gaussian filter to perform 
    narrowband filtering (much narrower than butterworth).  
    Problem is that bursts may generate ringing. 
    This should be better with taper=2, but it is not clear
      debug = None (follow pyfusion debug), 2 plot responses
    
    See the __main__ code below for nice test facilities
    twid is the width of the transition from stop to pass (not impl.?)
    >>> tb = timebase(np.linspace(0,20,512))
    >>> w = 2*np.pi* 1  # 1 Hertz
    >>> dat = dummysig(tb,np.sin(w*tb)*(tb<np.max(tb)/3))
    >>> fop = filter_fourier_bandpass(dat,[0.9,1.1],[0.8,1.2],debug=1).signal[0]

    Testing can be done on the dummy data set generated after running 
    filters.py
    e.g. (with pyfusion,DEBUG=2
    make_mask(512, [0.8,.93], [0.9,.98],dat,2)
    # medium sharp shoulder
    fopmed = filter_fourier_bandpass(dat,[9.5,10.5],[9,11],debug=1).signal[0]
    # very sharp shoulders
    fopsharp = filter_fourier_bandpass(dat,[9.1,10.9],[9,11],debug=1)
    """
    if debug is None: debug = pyfusion.DBG()
    # normalising makes it easier to think about - also for But'w'h
    if (passband[0] < stopband[0]) or (passband[1] > stopband[1]):
        raise ValueError('passband {pb} outside stopband {sb}'.format(
            pb=passband, sb=stopband))
    norm_passband = input_data.timebase.normalise_freq(np.array(passband))
    norm_stopband = input_data.timebase.normalise_freq(np.array(stopband))
    NS = len(input_data.timebase)
    NA = next_nice_number(NS)
    input_data.history += str(" {fftt} : nice number: {NA} cf {NS}\n".format(
        fftt=pyfusion.fft_type, NA=NA, NS=NS))
    # take a little more to speed up FFT

    mask = make_mask(NA, norm_passband, norm_stopband, input_data, taper)
    output_data = deepcopy(input_data)  # was output_data = input_data

    if (pyfusion.fft_type == 'fftw3'):
        # should migrate elsewhere, but the import is only 6us
        # the setup time seems about 150-250us even if size is in wisdom
        # for 384, numpy is 20us, fftw3 is 4us, so fftw3 slower for less than
        # 10 channels (unless we cache the plan)
        #time# st=seconds()
        import pyfftw
        #time# im=seconds()
        tdtype = np.float32
        fdtype = np.complex64
        # this could be useful to cache.
        simd_align = pyfftw.simd_alignment  # 16 at the moment.
        tdom = pyfftw.n_byte_align(np.zeros(NA, dtype=tdtype), simd_align)
        FT = pyfftw.n_byte_align_empty(NA / 2 + 1, simd_align, fdtype)
        ids = [[id(tdom), id(FT)]]  # check to see if it moves out of alignment
        #time# alloc_t = seconds()
        fwd = pyfftw.FFTW(tdom,
                          FT,
                          direction='FFTW_FORWARD',
                          **pyfusion.fftw3_args)
        rev = pyfftw.FFTW(FT,
                          tdom,
                          direction='FFTW_BACKWARD',
                          **pyfusion.fftw3_args)
        #time# pl=seconds()
        #time# print("import {im:.2g}, alloc {al:.2g}, planboth {pl:.2g}"
        #time#      .format(im=im-st, al=alloc_t-im, pl=pl-alloc_t))
    else:
        tdtype = np.float32
        tdom = np.zeros(NA, dtype=tdtype)

        # example of tuning
        #pyfusion.fftw3_args= {'planning_timelimit': 50.0, 'threads':1, 'flags':['FFTW_MEASURE']}

    singl = not isinstance(output_data.channels, (list, tuple, np.ndarray))
    if singl:
        ## this is a fudge - need to set the value part to a list.
        output_data.signal = [
            output_data.signal
        ]  # not right - need to use same fudge as acq/bas
        # output_data.signal.n_channels = fudgey_n_channels  #bdb fudge for single channel diag - doesn't work, because we fudged output_data.signal to be a list (or because n_channels is a func)
    for i, s in enumerate(output_data.signal):
        #if len(output_data.signal) == 1: print('bug for a single signal')

        #time run -i  pyfusion/examples/plot_svd.py "dev_name='LHD'" start_time=.497 "normalise='r'" shot_number=90091 numpts=512 diag_name=MP2010HMPno612 "filter=dict(centre=8e3,bw=5e3,taper=2)" plot_mag=1 plot_phase=1 separate=1 closed=0 time_range=[0.0000,4.]
        # 4.5 cf 15.8diag_name=MP2010HMPno612, time_range=[0.0000,2.80000]
        # 0, 4.194304 2**21 samples, 21.8 cf 6.8 1thr
        # (0,2)secs 90091 =2000000 samples 17 np, 5.73 2thread, nosimd, 6.1 1thread (mem bw?) 3.2 sec no filt
        # note - the above are on an intermeittently loaded E4300 2 processor, below on 4 core 5/760
        # 0, 4.194304 2**21 samples, 10.9 cf 3.16 thr2 3.47 1thr and 2.0 secs no filter
        # for 17 fft/ifft takes about 1.16 sec 2 threads - should be (27.5ms+28.6)*17 = 952ms (14.2 2thr) OK
        # duplicate the fft execute lines  4.3(3.47)  2thr 3.7(3.16) extra 810ms (expect 14.2ms * 2 * 17) =482
        # the difference between 2 and 1thr should be 14*2*17 ms 500ms.
        # orignall - 90ms/channel extra in reverse trasnform - maybe the 50 sec limit stopped optimization
        # next _nice: 5.74 for 10 sec lenny
        #  E4300: 1thr  9.3 (39np) for 10 sec 90091;    5.5 for 4 sec (19.6 np)
        if (pyfusion.fft_type == 'fftw3'
            ):  # fftw3 nosim, no thread 2.8s cf 10s
            #time# sst = seconds()
            tdom[
                0:
                NS] = s  # indexed to make sure tdom is in the right part of memory
            if NS != NA: tdom[NS:] = 0.
            fwd.execute()
            FT[:] = FT * mask[0:NA / 2 + 1]  # 12ms
            rev.execute()
            output_data.signal[i] = tdom[0:NS] / NA  # doco says NA
            ids.append([id(tdom), id(FT)])
            #time# print("{dt:.1f}us".format(dt=(seconds()-sst)/1e-6)),
        else:  # default to numpy
            tdom[0:NS] = s
            FT = np.fft.fft(tdom)
            IFT = np.fft.ifft(mask * FT)
            if np.max(np.abs(IFT.imag)) > 1e-6 * np.max(np.abs(IFT.real)):
                pyfusion.logger.warning("inverse fft imag part > 1e-6")

            output_data.signal[i] = IFT.real[0:NS]

    if debug > 2: print('ids of fftw3 input and output: {t}'.format(t=ids))
    if debug > 1:
        fig = plt.figure()
        #fplot = host_subplot(111)
        fplot = fig.add_subplot(111)
        tplot = fplot.twinx()
        tplot.plot(input_data.signal[0], 'c', label='input')
        # for a while I needed a factor of 3 here too for fftw - why ????
        #plt.plot(output_data.signal[0]/(3*NA),'m',label='output/{N}'.format(N=3*NA))
        tplot.plot(output_data.signal[0], 'm', label='output')
        tplot.set_ylim(-2.4, 1.1)
        fplot.plot(mask, 'r.-', label='mask')
        fplot.plot(np.abs(FT) / len(mask), label='FT')
        #fplot.set_ylim(-.2,3.8)
        #fplot.set_yscale('log', subsy=[2,5])
        #fplot.set_ylim(1e-7,1e5)
        fplot.set_yscale('symlog', linthreshy=1e-6)
        fplot.set_ylim(0, 1e8)
        fig.suptitle('Passband {pbl}...{pbh}'.format(pbl=passband[0],
                                                     pbh=passband[1]))
        fplot.legend(loc=4)  # bottom right
        tplot.legend(loc=0)
        plt.show()
    debug_(debug, 3, key='filter_fourier')
    if np.max(mask) == 0: raise ValueError('Filter blocks all signals')
    from pyfusion.data.timeseries import Signal
    if singl:
        output_data.signal = Signal(output_data.signal[0])
        # output_data.signal.n_channels = fudgey_n_channels  would work if signal was propoer not just an array of data
    return output_data
예제 #9
0
def make_mask(NA, norm_passband, norm_stopband, input_data, taper):
    """  works well now, except that the stopband is adjusted to be
    symmetric about the passband (take the average of the differences
    The problem with crashes (zero mask) was solved by shifting the 
    mask before and after integrating, also a test for aliasing (on the
    mask before integration).
    """
    mask = np.zeros(NA)
    # define the 4 key points
    #         /npblow-------------npbhi\
    # ___nsbl/                          \nsbhi____
    n_sb_low = int(norm_stopband[0] * NA / 2)
    n_pb_low = int(norm_passband[0] * NA / 2)
    n_pb_hi = int(norm_passband[1] * NA / 2)
    n_sb_hi = int(norm_stopband[1] * NA / 2)

    dt = float(np.average(np.diff(input_data.timebase)))
    if n_sb_hi >= len(mask):
        raise ValueError('Filter frequency too high for data - units '
                         'problem? - sample spacing is {dt:.2g}'.format(dt=dt))

    # twid is the transition width, and should default so that the sloped part is the same width as the flat?
    # !!! twid is not an input - !!!! doesn't do that yet.
    # make the transition width an even number, and the larger of the two
    # need to pull this code out and be sure it works.
    twid = 2 * (1 + max(n_pb_low - n_sb_low, n_sb_hi - n_pb_hi) // 2)
    if (twid > (n_pb_low - n_sb_low) * 3) or (twid > (n_sb_hi - n_pb_hi) * 3):
        print(
            '*********** Warning - unbalanced cutoff rate between high and low end'
            ' will cause the cutoff rates to be equalised widening one and reducing the other'
            ' difference between stop and pass bands should be similar ar both ends.'
        )
    if (twid < 4):  # or (n_sb_low < 0):  #< not requ since fixed
        if taper == 2:
            raise ValueError(
                'taper 2 requires a bigger margin between stop and pass')
        elif taper is None:
            warn('defaulting taper to 1 as band edges are sharp: twid={twid}'.
                 format(twid=twid))
            taper = 1
    else:
        if taper is None:
            taper = 2

    if taper == 1:
        #          _____
        #         /     \
        #        /       \
        # ______/         \___
        # want 0 at sb low and sb high, 1 at pb low and pb high
        # present code does not quite do this.
        # try to prevent zero width or very narrow (DC only) filters.
        if n_sb_low < 0: n_sb_low = 0
        if n_pb_low < 0: n_pb_low = 0
        if n_pb_hi < 1: n_pb_hi = 1
        if n_sb_hi <= n_pb_hi: n_sb_hi = n_pb_hi + 1
        for n in range(n_sb_low, n_pb_low + 1):
            if n_sb_low == n_pb_low:  # allow for pass=stop on low side
                mask[n] = 1.
            else:
                mask[n] = float(n - n_sb_low) / (n_pb_low - n_sb_low
                                                 )  # trapezoid
        for n in range(n_pb_hi, n_sb_hi + 1):
            mask[n] = float(n_sb_hi - n) / (n_sb_hi - n_pb_hi)  # trapezoid
        for n in range(n_pb_low, n_pb_hi + 1):
            mask[n] = 1
    elif taper == 2:
        # Note - must symmetrise (so that cumsum works)
        #          _
        #         / \
        #        |   |
        #  ______/   \___
        # want 0 at sb low and sb high, 1 at pb low and pb high
        # this means that the peak of the mask before integration is halfway between sb_low and pb_low
        # and pb_low - sb_low is an even number
        # present code does not quite do this.

        n_sb_low = n_pb_low - twid  # sacrifice the stop band, not the pass
        n_sb_hi = n_pb_hi + twid

        low_mid = n_pb_low - twid // 2
        high_mid = n_pb_hi + twid // 2
        for n in range(n_sb_low, low_mid):
            mask[n] = float(n - n_sb_low) / (low_mid - 1 - n_sb_low
                                             )  # trapezoid
            mask[2 * low_mid - n - 1] = mask[n]  #down ramp - repeat max
        #wid_up = n_sb_hi - n_pb_hi
        for n in range(n_pb_hi, high_mid):  # negative tri
            mask[n] = float(n_pb_hi - n) / (high_mid - n_pb_hi - 1
                                            )  # trapezoid
            mask[2 * high_mid - n - 1] = mask[n]
        before_integration = mask
        # after running filters.py, this should be OK
        # make_mask(512, [0.8,.93], [0.9,.98],dat,2)
        # but changing 0.98 to 0.99 will give aliasing error.
        if np.max(np.abs(mask[NA // 2 - 4:NA // 2 + 4])) > 0:
            raise ValueError('mask aliasing error')
        # note: ifftshift is only different for an odd data length
        # the fftshifts were necessary to avoid weirdness if the
        # stopband went below zero freq.
        mask = np.fft.ifftshift(np.cumsum(np.fft.fftshift(mask)))  # integrate
        if pyfusion.DBG() > 1:
            nonr = 0.5 / dt
            fig = plt.figure()
            ax1 = fig.add_subplot(111)
            ax1.plot(np.arange(len(mask)) / dt / float(NA), mask, '.-')
            ax1.plot(np.arange(len(mask)) / dt / float(NA), before_integration)
            ax1.set_xlabel(
                'real freq. units, (norm on top scale), npoints={NA}, norm/real = {nonr}'
                .format(NA=NA, nonr=nonr))
            ax2 = ax1.twiny()
            # this is my hack - it should be OK, but may be a little out
            ax2.set_xlim(np.array(ax1.get_xlim()) / nonr)
            fig.suptitle(
                'mask before normalisation - twid={twid}'.format(twid=twid))
            plt.show(0)

        if np.max(mask) == 0:
            raise ValueError(
                'zero mask, '
                'norm_passband = {pb}, norm_stopband={sb}, taper {t}'.format(
                    pb=norm_passband, sb=norm_stopband, t=taper))
        mask = mask / np.max(mask)
    # reflection only required for complex data
    # this even and odd is not totally thought through...but it seems OK
    if np.mod(NA, 2) == 0: mask[:NA / 2:-1] = mask[1:(NA / 2)]  # even
    else: mask[:1 + NA / 2:-1] = mask[1:(NA / 2)]  # odd
    return (mask)
예제 #10
0
파일: fetch.py 프로젝트: bdb112/pyfusion
def regenerate_dim(x):
    """ assume x in ns since epoch from the current time
        This code assumes the first stamp is correct """
    msg = None  # msg allows us to see which shot/diag was at fault
    diffs = np.diff(x)
    # bincount needs a positive input and needs an array with N elts where N is the largest number input
    small = (diffs > 0) & (diffs < 1000000)
    sorted_diffs = np.sort(diffs[np.where(small)[0]])
    bincounts = np.bincount(sorted_diffs)
    bigcounts, bigvals = myhist(diffs[np.where(~small)[0]])
    dt_freq_pairs = [[argc, bincounts[argc]]
                     for argc in np.argsort(bincounts)[::-1][0:5]]
    if dt_freq_pairs[0][1] > len(x) * 0.95:
        actual_dtns = dt_freq_pairs[0][
            0]  # should reconcile with the dtns calculated below (udiffs)
        if diffs[0] != actual_dtns:
            print(
                '***Warning - first element or second timestamp is likely to be corrupted - dtns = ',
                diffs[0])
    if pyfusion.VERBOSE > 0:
        print('[[diff, count],....]')
        print('small:', dt_freq_pairs
              )  # e.g. ('small:', [[2000, 511815], [198000, 3], [770000, 2]...
        print('big or negative:',
              [[bigvals[argc], bigcounts[argc]]
               for argc in np.argsort(bigcounts)[::-1][0:10]])
        print('bincounts', bincounts)

    debug_(pyfusion.DEBUG,
           3,
           key="repair",
           msg="repair of W7-X scrambled Langmuir probe timebase")
    udiffs, counts = np.unique(diffs, return_counts=1)
    if len(counts) == 1:
        msg = 'no repair required'
        print(msg)
        return (x, msg)
    dtns_old = 1 + np.argmax(
        bincounts[1:])  # skip the first position - it is 0
    dtns = udiffs[np.argmax(counts)]
    histo = plt.hist if pyfusion.DBG() > 1 else np.histogram
    cnts, vals = histo(
        x, bins=200
    )[0:
      2]  #look over all the timestamps, not the differences so we see where the gaps are
    # ignore the two end bins - hopefully there will be very few there
    # find bins which are noticabley empty  - very rough
    wmin = np.where(cnts[1:-1] < np.max(cnts[1:-1]) / 10)[0]
    if len(wmin) > 0:
        print('**********\n*********** Gap in data may be > {p:.2f}%'.format(
            p=100 * len(wmin) / float(len(cnts))))
    x01111 = np.ones(len(x))  # x01111 will be all 1s except for the first elt.
    x01111[
        0] = 0  # we apparently used to generate a trace starting at 0 and then correct in another routine - now correct it here.

    # print(x[0:4]/1e9)
    errcnt = np.sum(bigcounts) + np.sum(np.sort(counts)[::-1][1:])
    if errcnt > 0 or (pyfusion.VERBOSE > 0):
        msg = str(
            '** repaired length of {l:,}, dtns={dtns:,}, {e} erroneous utcs (if first sample is correct)'
            .format(l=len(x01111), dtns=dtns, e=errcnt))

    fixedx = np.cumsum(x01111) * dtns + x[0]
    # the following comment is most likely no longer applicable:   (marked ##)
    # The sq wave look is probably a inadequate precision bug in save_compressed
    ##This misses the case when the time values are locked onto discrete values ('sq wave look')
    ## unless the tolerance is made > 0.1 secs (e.g. 1.9 works for:
    ## run  pyfusion/examples/plot_signals diag_name='W7X_L57_LP01_I' shot_number=[20160303,13] decimate=10-1 sharey=0
    # 2020: This was meant to take care of cases where the lower time bits get
    # stuck, making the time look like a staircase, for example
    # [20160310,11] diag_name=W7X_L53_LP08_I has a step length of 83ms (from old npz files)
    # This code is retained, but now the time offset tolerance should be much smaller - e.g. 1ms
    # plt.plot(x[0:100000],',')
    for dtt in [1e-5, 1e-4, 1e-3, .01, .1]:
        wbad = np.where(abs(x - fixedx) > dtt * 1e9)[0]
        if len(wbad) < len(fixedx) / 10:
            break
    else:
        print(
            'Unable to repair most of the timebase by time offsets up to {dtt} sec'
            .format(dtt=dtt))

    debug_(pyfusion.DEBUG,
           1,
           key="repair",
           msg="repair of W7-X scrambled Langmuir timebase")
    fixedx[wbad] = np.nan
    meanoff = np.nanmean(x - fixedx)
    print(
        'For the repaired {pc:.2f}%, the mean apparent timebase offset is {err:.6f} and mean spread is ~{spr:.2g} sec'
        .format(err=meanoff / 1e9,
                spr=np.nanmean(np.abs(x - fixedx - meanoff)) / 1e9,
                pc=100 * (1 - len(wbad) / float(len(x)))))
    # suppress plot if VERBOSE<0 even if big errors
    if pyfusion.VERBOSE > 0 or ((pyfusion.VERBOSE > -1) and
                                (np.abs(meanoff) / 1e9 > 1e-6)):
        plt.figure()
        wnotnan = np.where(~np.isnan(x))[0]  # avoid lots of runtime errors
        offp = plt.plot((x - fixedx)[wnotnan] / 1e9)
        ax = plt.gca()
        ax.set_yscale('symlog', linthreshy=1e-3)
        ax.set_title(
            'timing offets corrected by regenerate_dim - VERBOSE=-1 to suppress'
        )
        ax.set_ylabel('seconds')
        plt.show()
    if np.all(np.isnan(fixedx)):
        raise ValueError('fetch: all nans ')
    return (fixedx, msg)
예제 #11
0
def make_title(formatstr,
               input_data,
               channum=None,
               at_dict={},
               min_length=3,
               raw_names=False):
    """ Return a string describing the shot number, channel name etc using
    a formatstr which refers to items in a dictionary (at_dict), assembled in
    this routine, based on input_data and an optional dictionary which
    contains anything not otherwise available in input_data

    """
    ##    at_dict.update({'shot': input_data.meta['shot']})
    exception = () if pyfusion.DBG() > 3 else Exception
    try:
        at_dict.update(input_data.meta)  # this gets all of it!

        if channum is None:
            name = ''
        else:
            if isinstance(input_data.channels, list):
                chan = input_data.channels[channum]
            else:
                chan = input_data.channels
            if raw_names:
                name = chan.name
            else:
                name = chan.config_name

        try:  #  remove leading W7X_  LHD_ etc from channel labels.
            if 'device' in input_data.params.values()[0]['params']:
                device = input_data.params.values()[0]['params']['device']
                name = name.replace(device + '_', '')
        except:
            pass
        at_dict.update({'units': chan.units})
        at_dict.update({'name': name})
        # replace internal strings of non-numbers with a single .  a14_input03 -> 14.03
        short_name = ''
        last_was_number = False
        discarded = ''
        debug_(pyfusion.DEBUG, 4, key='make_title')
        for c in name:  # start from the first char
            if c >= '0' and c <= '9':
                short_name += c
                last_was_number = True
            else:
                if last_was_number: short_name += '.'
                else: discarded += c
                last_was_number = False

        if len(short_name) <= min_length:
            # if it fits, have the lot
            if len(name) < 8:
                short_name = name
                # else allow 4 more chars - makes about 6-8 chars
            else:
                short_name = discarded[-4:] + short_name

        at_dict.update({'short_name': short_name})
        return (formatstr.format(**at_dict))
    except exception as ex:
        warn('in make_title for format="%s", at_dict=%s' %
             (formatstr, at_dict),
             exception=ex)
        return ('')
예제 #12
0
    def plot(self,
             hold=0,
             ch=-1,
             tstart=None,
             tend=None,
             tstep=None,
             scl=None,
             navg=1,
             debug=1,
             axes=None,
             *args,
             **kwargs):
        """ ch=-1 -> Plot all the data items - can take a long time.
        """
        import pylab as pl
        name = self.vardict['Name']
        dim = self.vardict['DimSize']
        nv = self.vardict['ValNo']
        nd = self.vardict['DimNo']
        if ch is None:
            if name == 'lhd_mse1':
                ch = 19
            elif name.lower() == 'thomson':
                ch = 3
            else:
                ch = 1

        if axes != None: ax = axes
        else: ax = pl.gca()

        ax.plot(hold=hold)

        linest = ['-', ':', '--', '-.']
        if pyfusion.DBG() > 0: print('shape is {s}'.format(s=shape(dim)))
        if len(shape(dim)) == 0:
            for (p, name) in enumerate(self.vardict['ValName']):
                col, lin = divmod(p, len(linest))
                if ch == -1 or ch == (p + 1):
                    ax.plot(self.data[:, 0],
                            self.data[:, p + 1],
                            label="{0}:{1}".format(name, self.shot),
                            linestyle=linest[lin],
                            color=colorset[col % len(colorset)],
                            *args,
                            **kwargs)

        else:  # 2 dim plots
            # reshape into radial profiles
            data3D = self.data.reshape(dim[0], dim[1], nd + nv)
            if tstart is None: tstart = 0
            if tend is None: tend = dim[0] / 2
            if tstep is None: tstep = max([tend / 50, 1])  # reduce to 50 steps
            # check maximum to determine scaling
            if scl is None:
                scl = max(abs((data3D[:, :, ch + nd - 1]) / 50))  # was /10
            if debug > 0:
                print("tstart=%.4g, tstep=%.4g, tend=%.4g, scl=%.4g))" %
                      (tstart, tstep, tend, scl))
            # if more than 10 profiles, plot all in grey, then
            if (((tend - tstart) / tstep) > 10) and tstep > 0:
                for t in range(tstart, tend, tstep):
                    xval = average(data3D[t:t + navg, :, nd - 1], 0)
                    ax.plot(xval,
                            scl * t +
                            average(data3D[t:t + navg, :, ch + nd - 1], 0),
                            color=0.8 -
                            (0.4 / 5) * mod(t - tstart, 5) * array([1, 1, 1]))

            if navg > 1: avg = "avg%d" % (navg)
            else: avg = ''
            # plot every 5th one in colour

            for t in range(tstart, tend, tstep * 5):
                lab = avg + "%s:%.4g %s" % (self.vardict['ValName'][
                    ch - 1], data3D[t, 0, 0], self.vardict['DimUnit'][0])
                ax.plot(xval,
                        scl * t +
                        average(data3D[t:t + navg, :, ch + nd - 1], 0),
                        label=lab,
                        *args,
                        **kwargs)

        square_inches = (array(ax.get_figure().get_size_inches()).prod() /
                         array(ax.get_geometry()[0:2]).prod())
        if square_inches < 15: pl.rcParams['legend.fontsize'] = 'small'
        if square_inches > 5: ax.legend()
        pl.title("%d:%s" % (self.vardict['ShotNo'], self.vardict['Name']))
예제 #13
0
def get_basic_diagnostics(diags=None,
                          shot=54196,
                          times=None,
                          delay=None,
                          exception=False,
                          debug=0):
    """ return a list of np.arrays of normally numeric values for the 
    times given, for the given shot.
    Will access server if env('IGETFILE') points to an exe, else accesses cache
    """

    global HJ_summary
    # if no exception given and we are not debugging
    # note - exception=None is a valid entry, meaning tolerate no exceptions
    # so the "default" we use is False
    if exception == False and debug == 0: exception = Exception

    if diags is None: diags = "<n_e19>,b_0,i_p,w_p,dw_pdt,dw_pdt2".split(',')
    if len(np.shape(diags)) == 0: diags = [diags]
    # LHD only    if delay is None: delay = get_delay(shot)

    if times is None:
        times = np.linspace(0, 4, 4000)

    times = np.array(times)
    vals = {}
    # create an extra time array to allow a cross check
    vals.update({'check_tm': times})
    vals.update({'check_shot': np.zeros(len(times), dtype=np.int) + shot})
    debug_(pyfusion.DEBUG, 2, key='get_basic')
    for diag in diags:
        if not (diag in file_info):
            warn('diagnostic {0} not found in shot {1}'.format(diag, shot),
                 stacklevel=2)
            vals.update({diag: np.nan + times})
            debug_(pyfusion.DEBUG, 2, key='get_basic')
        else:
            info = file_info[diag]
            varname = info['name']
            infofmt = info['format']
            subfolder = infofmt.split('@')[0]
            filepath = os.path.sep.join(
                [localigetfilepath, subfolder, infofmt])
            if ':' in varname: (oper, varname) = varname.split(':')
            else: oper = None

            if '(' in varname:
                try:
                    left, right = varname.split('(')
                    varname, rest = right.split(')')
                except:
                    raise ValueError(
                        'in expression {v} - parens?'.format(varname))
            if infofmt.find('.npz') > 0:
                try:
                    test = HJ_summary.keys()
                except:
                    csvfilename = acq_HJ + '/' + infofmt
                    if pyfusion.DBG() > 1:
                        print('looking for HeliotronJ summary in' +
                              csvfilename)
                    print('reloading {0}'.format(csvfilename))
                    HJ_summary = np.load(csvfilename)

                val = HJ_summary[varname][shot]
                valarr = np.double(val) + (times * 0)
            elif 'get_static_params' in infofmt:
                pdicts = eval(infofmt.format(shot=shot))
                if len(pdicts) == 0:
                    print('empty dictionary returned')

                val = pdicts[varname]
                valarr = np.double(val) + (times * 0)
            else:  # read signal from data system
                debug_(max(pyfusion.DEBUG, debug), level=4, key='find_data')
                try:
                    #get HJparams
                    channel = info['name']
                    outdata = np.zeros(1024 * 2 * 256 + 1)
                    channel_length = (len(outdata) - 1) / 2
                    # outdfile only needed for opt=1 (get data via temp file)
                    # with tempfile.NamedTemporaryFile(prefix="pyfusion_") as outdfile:
                    ierror, getrets = gethjdata.gethjdata(shot,
                                                          channel_length,
                                                          info['name'],
                                                          verbose=VERBOSE,
                                                          opt=1,
                                                          ierror=2,
                                                          outdata=outdata,
                                                          outname='')

                    if ierror != 0:
                        raise LookupError('data not found for {s}:{c}'.format(
                            s=shot, c=channel))
                    ch = Channel(info['name'], Coords('dummy', (0, 0, 0)))
                    # timebase in secs (was ms in raw data)
                    dg = TimeseriesData(timebase=Timebase(1e-3 *
                                                          getrets[1::2]),
                                        signal=Signal(getrets[2::2]),
                                        channels=ch)
                except exception as reason:
                    if debug > 0:
                        print('exception running gethjdata {r} {a}',
                              format(r=reason, a=reason.args))
                    dg = None
                    #break  # give up and try next diagnostic
                if dg is None:  # messy - break doesn't do what I want?
                    valarr = None
                else:
                    nd = 1  # initially only deal with single channels (HJ)
                    # get the column(s) of the array corresponding to the name
                    w = [0]
                    if (oper in 'sum,average,rms,max,min'.split(',')):
                        if oper == 'sum': op = np.sum
                        elif oper == 'average': op = np.average
                        elif oper == 'min': op = np.min
                        elif oper == 'std': op = np.std
                        else:
                            raise ValueError(
                                'operator {o} in {n} not known to get_basic_diagnostics'
                                .format(o=oper, n=info['name']))
                        # valarr = op(dg.data[:,nd+w],1)
                        valarr = op(dg.data[:, nd + w], 1)
                    else:
                        if len(w) != 1:
                            raise LookupError(
                                'Need just one instance of variable {0} in {1}'
                                .format(varname, dg.filename))
                        dg.data = dg.signal  # fudge compatibility
                        if len(np.shape(dg.data)) != 1:  # 2 for igetfile
                            raise LookupError(
                                'insufficient data for {0} in {1}'.format(
                                    varname, dg.filename))

                        #valarr = dg.data[:,nd+w[0]]

                    #tim =  dg.data[:,0] - delay
                    valarr = dg.signal
                    tim = dg.timebase

                    # fudge until we can gete the number of points
                    valarr = valarr[:np.argmax(tim)]
                    tim = tim[:np.argmax(tim)]

                    if oper == 'ddt':  # derivative operator
                        valarr = np.diff(valarr) / (np.average(np.diff(tim)))
                        tim = (tim[0:-1] + tim[1:]) / 2.0

                    if oper == 'ddt2':  # abd(ddw)*derivative operator
                        dw = np.diff(valarr) / (np.average(np.diff(tim)))
                        ddw = np.diff(dw) / (np.average(np.diff(tim)))
                        tim = tim[2:]
                        valarr = 4e-6 * dw[1:] * np.abs(ddw)

                    if (len(tim) < 10) or (np.std(tim) < 0.1):
                        raise ValueError('Insufficient points or degenerate'
                                         'timebase data in {0}, {1}'.format(
                                             varname, dg.filename))

                    valarr = (stineman_interp(times, tim, valarr))
                    w = np.where(times > max(tim))
                    valarr[w] = np.nan

            if valarr is not None: vals.update({diag: valarr})
    debug_(max(pyfusion.DEBUG, debug), level=5, key='interp')
    return (vals)
예제 #14
0
파일: plots.py 프로젝트: bdb112/pyfusion
def plot_spectrogram(input_data,
                     windowfn=None,
                     units='kHz',
                     channel_number=0,
                     filename=None,
                     coloraxis='now is clim!',
                     clim=None,
                     xlim=None,
                     ylim=None,
                     noverlap=0,
                     NFFT=None,
                     suptitle='shot {shot}',
                     title=None,
                     sharey=True,
                     sharex=True,
                     n_columns=None,
                     raw_names=False,
                     hspace=None,
                     labelfmt="{short_name} {units}",
                     filldown=False,
                     hold=None,
                     **kwargs):
    """    Plot a spectrogram 
      NFFTs
      noverlap  (integer - number of samples overlapped, float - 1.0 -> half nFFT)
      windowfn (p.window.hanning)
      coloraxis - gets from pyfusion.conf.get('Plots')

    Accepts multi or single channel data (I think?)
    Title will be auto generated: if supplied, include '+' to include the auto-generated part
    To suppress, use title=' ' (one space)
    clim:  [None] auto clim, 'show' - label each graph with clims
    Returns ax_list which can be used for clims or other manipulations
    """
    import pylab as pl

    # can't recurse as this is a signal   input_data[chan.name].plot_specgram()

    if hold is not None and hold == 0:
        pl.figure()
    if windowfn is None: windowfn = pl.window_hanning

    # look in the config file section Plots for NFFT = 1234
    # Dave - how about a method to allow this in one line
    # e.g. pyfusion.config.numgetdef('Plots','NFFT', 2048)
    # usage:
    # if (NFFT is None): NFFT = pyfusion.config.numgetdef('Plots','NFFT', 2048)
    #
    # also nice to have pyfusion.config.re-read()
    if NFFT is None:
        try:
            NFFT = (int(pyfusion.config.get('Plots', 'NFFT')))
        except:
            NFFT = 2048

    print(NFFT)
    if units.lower() == 'khz': ffact = 1000.
    else: ffact = 1.
    xextent = (min(input_data.timebase), max(input_data.timebase))

    n_pics = input_data.signal.n_channels()  # doesn't work with fftd data
    if n_columns is None:
        n_columns = int(0.8 + np.sqrt(n_pics))
    n_rows = int(round(0.49 + (n_columns / float(n_pics))))
    while n_rows * n_columns < n_pics:
        n_rows += 1

    if (n_rows > 3) and (hspace is None):
        hspace = 0.001  # should be 0, but some plots omitted if
        #exactly zero - fixed in matplotlib 1
    if pyfusion.VERBOSE > 3: print(str(n_rows) + ' ' + str(n_columns))

    fontkwargs = {'fontsize': 'small'}
    # True is the only sensible indicator I can think of that we want intelligient defaults
    displace = ''  # doens't make send for spectra, as they are usually squarish

    axcount = -1  # so the first will be 0
    ax_list = []  #  We don't use subplots(), because we want control of sharey
    for row in range(n_rows):
        for col in range(n_columns):
            axcount += 1
            # natural sequence for subplot is to fillacross l-r, then top-down
            subplot_num = row * n_columns + col

            # we often want to fill downwards for simple arrays - especially if comparing with a 3x16 array
            if filldown: chan_num = col * n_rows + row
            else: chan_num = row * n_columns + col

            #print(chan_num, subplot_num, col, row)
            if chan_num >= input_data.signal.n_channels(): break
            if pyfusion.VERBOSE > 3: print(subplot_num + 1, chan_num)

            if pyfusion.VERBOSE > 3: print(subplot_num + 1, chan_num)
            if axcount == 0:
                # note - sharex=None is required so that overlays can be done
                if n_rows * n_columns == 1:
                    axlead = pl.gca(
                    )  # this allows plotting on existing axis for a single plot
                else:
                    axlead = pl.subplot(n_rows,
                                        n_columns,
                                        subplot_num + 1,
                                        sharex=None)
                axn = axlead
                axlead_x = axlead if sharex else None
            else:
                if axcount >= sharey:
                    axn = pl.subplot(n_rows,
                                     n_columns,
                                     subplot_num + 1,
                                     sharex=axlead_x,
                                     sharey=axlead)
                else:  # another noshare y, but sharex
                    axn = pl.subplot(n_rows,
                                     n_columns,
                                     subplot_num + 1,
                                     sharex=axlead_x)
                    axlead = axn

            noverlap = noverlap if isinstance(noverlap, int) else int(
                round(NFFT / (1 + 1. / (1e-6 + noverlap))))

            (specarr, freqs, t, im) = \
                axn.specgram(input_data.signal.get_channel(chan_num),
                             NFFT=NFFT, noverlap=noverlap,
                             Fs=input_data.timebase.sample_freq/ffact,
                             window=windowfn, xextent=xextent, **kwargs)
            ax_list.append(axn)
            # Used be (incorrectly coloraxis)
            if pyfusion.VERBOSE > 2:
                print(
                    'data/plot_spectrogram: noverlap={no}, {nt} time segs, {nf} freqs'
                    .format(no=noverlap, nt=len(t), nf=len(freqs)))
            if xlim is not None:
                axn.set_xlim(xlim)
            if ylim is not None:
                axn.set_ylim(ylim)
            if clim is not None and clim != 'show': im.set_clim(clim)
            else:
                try:
                    pl.clim(eval(pyfusion.config.get('Plots', 'coloraxis')))
                except:
                    pass

            if labelfmt != None:
                if len(make_title(labelfmt, input_data, 0,
                                  raw_names=raw_names)) > 11:
                    mylabel = pl.xlabel
                else:
                    mylabel = pl.ylabel

            lab = make_title(labelfmt + displace, input_data, chan_num)
            mylabel(lab, **fontkwargs)

            # look in the config file section Plots for a string like
            # FT_Axis = [0,0.08,0,500e3]   don't quote
            exceptions_to_hide = Exception if pyfusion.DBG() < 3 else None
            try:
                #pl.axis(eval(pyfusion.config.get('Plots','FT_Axis')))
                # this is clumsier now we need to consider freq units.
                axt = eval(pyfusion.config.get('Plots', 'FT_Axis'))
                set_axis_if_OK(pl.gca(), axt[0:2], np.array(axt[2:]) / ffact)
            except exceptions_to_hide:
                pass
            # but override X if we have zoomed in bdb
            if 'reduce_time' in input_data.history:
                pl.xlim(np.min(input_data.timebase), max(input_data.timebase))

            try:
                tit = str("{s}, {c}".format(
                    s=input_data.meta['shot'],
                    c=input_data.channels[chan_num].config_name))
            except:
                tit = str("{s}, {c}".format(s=input_data.meta['shot'],
                                            c=input_data.channels.name))
            if title is None or title == '':  # get the default title
                pass  # tit is the default
            else:
                tit = title.replace('+', tit)
            # No titles for rows >= 3 - maybe better to have no titles at all
            #   and use the legend (without frame) to show the channel number
            # instead of the ytitle.
            if n_rows <= 3:  # keep consistent with n_rows > 3 above
                pl.title(tit)
            if clim == 'show':
                axn.legend([], [],
                           frameon=0,
                           title=str(
                               np.round(axn.get_images()[0].get_clim(), 1)))

    # ======== end of plot loop
    if suptitle is not None:
        try:
            suptitlestr = (suptitle.format(**input_data.meta))
        except:
            suptitlestr = ''
            debug_(pyfusion.DEBUG,
                   1,
                   key='plot_signal_suptitle',
                   msg=' input metadata [{m}] does not have  a '
                   'key for suptitle [{s}]'.format(m=input_data.meta,
                                                   s=suptitle))

    if suptitle != '': pl.suptitle(suptitlestr)
    if hspace is not None:  # adjust vertical spacing between plots
        pl.gcf().subplotpars.hspace = hspace
        pl.gcf().subplotpars.bottom = hspace + 0.08  # was 0.04
        extratop = 0.01
        if suptitle != '': extratop += 0.04
        pl.gcf().subplots_adjust(
            top=1 - (hspace + extratop))  # allow a little room for title

    if filename != None:
        pl.savefig(filename)
    else:
        pl.show(block=0)

    return (ax_list)
예제 #15
0
def get_flat_top(shot=54196,
                 times=None,
                 smooth_dt=None,
                 maxddw=None,
                 hold=0,
                 debug=0):
    """ debug=1 gives a plot
    """
    if times is None: times = np.linspace(0.02, 8, 8020)
    from pyfusion.data.signal_processing import smooth

    bp = get_basic_diagnostics(shot=shot,
                               diags=['w_p', 'dw_pdt', 'b_0'],
                               times=times)
    # assume sign is OK - at the moment, the code to fix sign is in merge
    # but it is inactive.  Probably should be in get_basic_diag..
    # so far, it seems that w_p and i_p are corrected - not sure
    # about other flux loops.
    w_p = bp['w_p']
    dw = bp['dw_pdt']
    w = np.where(w_p < 1e6)[0]  # I guess this is to exclude nans
    len(w)
    cent = np.sum(w_p[w] * times[w]) / np.sum(w_p[w])
    icent = np.where(times > cent)[0][0]
    print("centroid = {0:.1f}".format(cent))
    if maxddw is None: maxddw = 100
    if smooth_dt is None: smooth_dt = 0.1  # smooth for 0.05 sec
    dt = (times[1] - times[0])
    ns = int(smooth_dt / dt)
    smootharr = [ns, ns, ns]
    offs = len(smootharr * ns)  # correction for smoothing offset
    dwsm = smooth(dw, n_smooth=smootharr)  # smooth dwdt
    ddw = np.diff(dwsm) / dt  #second deriv
    # work away from the centroid until 2nd deriv exceeds maxddw
    # assume 100kJ /sec is ramp, and a change of this over a second

    wb = int(0.5 * offs) + np.nanargmax(dwsm)
    we = int(0.1 * offs) + np.nanargmin(dwsm)  #
    wpmax = np.nanmax(w_p)
    # used to be maxddw - too restrictive now try dwsm
    wgtrev = np.where(
        np.abs(dwsm[icent - offs / 2::-1]) > maxddw * wpmax / 100)[0]
    wgtfor = np.where(
        np.abs(dwsm[icent - offs / 2:]) > maxddw * wpmax / 100)[0]
    if (len(wgtrev) < 10) or (len(wgtfor) < 10):
        print('*** flat_top not found on shot {s}'.format(s=shot))
        return (0, 0, (0, 0, 0, 0, 0))

    wbf = icent - wgtrev[0]
    wef = icent + wgtfor[0]
    if debug > 0:
        pl.plot(w_p, label='w_p', hold=hold)
        pl.plot(ddw, label='ddw')
        pl.plot(dwsm, linewidth=3, label='sm(dw)')
        pl.plot(dw / 10, label='dw_pdt/10')
        pl.scatter([wb, wbf, icent, wef, we], [0, 200, 300, 250, 275])
        pl.plot([wb, we], [0, 0], label='b--e')
        pl.plot([wbf, wef],
                np.ones(2) * maxddw * wpmax / 100,
                'o-',
                linewidth=2,
                label='bf-ef')
        pl.ylim(np.array([-1.1, 1.1]) * max(abs(dwsm)))
        pl.title(shot)
        pl.legend()
    debug_(max(pyfusion.DBG(), debug), 2, key='flat_top')
    #return(times[wb], times[we],(wb,we,wbf,wef,icent)) # used to ignore wbf,wef
    return (times[wbf], times[wef], (wb, we, wbf, wef, icent))
예제 #16
0
import os, sys
import pyfusion

""" Find binaries and compile if required

   The binaries are different for python2 and 3 etc, so name gethjdata accordingly
    We need to import different modules as gethjdata . To facilitata this, we need a 
   text-based import command that allows an 'as' cluase.
   This module exports either the standard import_module, or a home-made one, if the
   standard one will not do what we want, which seems to be the case

"""

# this is the right way, but I can't load mod as name this way
#    from importlib import import_module
if pyfusion.DBG() > 0: 
    print("Can't load via official import_module, trying a workaround using exec()")

def import_module(modstr, alt_name=None, dict1=None):
    if alt_name is None: alt_name = modstr
    if dict1 is None:
        raise Exception('need a dictionary in dict1 (usually locals())')
    else:
        exec('from . import {m} as {a}'.format(m=modstr, a=alt_name),globals(),dict1)

        


"""
#works
exec('import pyfusion.acquisition.HeliotronJ.gethjdata2_7 as gethjdata')
예제 #17
0
def try_discretise_array(arr,
                         eps=0,
                         bits=0,
                         deltar=None,
                         verbose=0,
                         delta_encode=False,
                         unique=False):
    """
    Return an integer array and scales etc in a dictionary 
    - the dictionary form allows for added functionality.
    If bits=0, find the natural accuracy.  eps defaults to 1e-6
    """
    if verbose > 2: import pylab as pl
    if eps == 0: eps = 1e-6
    mono = (diff(arr) > 0).all()  # maybe handy later? esp. debugging
    if deltar is None:
        # don't sort if the data is a timebase (unique)
        data_sort = arr if unique else np.unique(arr)
        diff_sort = sort(
            diff(data_sort))  # don't want uniques because of noise
        if size(diff_sort) == 0: diff_sort = [0]  # in case all the same
        # with real representation, there will be many diffs ~ eps - 1e-8
        # or 1e-15*max - try to skip over these
        #  will have at least three cases
        #    - timebase with basically one diff and all diffdiffs in the noise
        #    - data with lots of diffs and lots of diffdiffs at a much lower level

        min_real_diff_ind = (diff_sort > max(diff_sort) / 1e4).nonzero()
        if size(min_real_diff_ind) == 0: min_real_diff_ind = [[0]]
        #      min_real_diff_ind[0] is the array of inidices satisfying that condition
        if verbose > 1:
            print("min. real difference indices = ", min_real_diff_ind)
        #discard all preceding this
        diff_sort = diff_sort[min_real_diff_ind[0][0]:]
        deltar = diff_sort[0]
        diff_diff_sort = diff(diff_sort)
        # now look for the point where the diff of differences first exceeds half the current estimate of difference

        # the diff of differences should just be the discretization noise
        # by looking further down the sorted diff array and averaging over
        # elements which are close in value to the min real difference, we can
        # reduce the effect of discretization error.
        large_diff_diffs_ind = (abs(diff_diff_sort) > deltar / 2).nonzero()
        if size(large_diff_diffs_ind) == 0:
            last_small_diff_diffs_ind = len(diff_sort) - 1
        else:
            first_large_diff_diffs_ind = large_diff_diffs_ind[0][0]
            last_small_diff_diffs_ind = first_large_diff_diffs_ind - 1

        # When the step size is within a few orders of representation
        # accuracy, problems appear if there a systematic component in
        # the representational noise.

        # Could try to limit the number of samples averaged over,
        # which would be very effective when the timebase starts from
        # zero.  MUST NOT sort the difference first in this case!
        # Better IF we can reliably detect single rate timebase, then
        # take (end-start)/(N-1) if last_small_diff_diffs_ind>10:
        # last_small_diff_diffs_ind=2 This limit would only work if
        # time started at zero.  A smarter way would be to find times
        # near zero, and get the difference there - this would work
        # with variable sampling rates provided the different rates
        # were integer multiples.  another trick is to try a power of
        # 10 times an integer. (which is now implemented in the calling routine)

        # Apr 2010 - fixed bug for len(diff_sort) ==  1  +1 in four places
        # like [0:last_small_diff_diffs_ind+1] - actually a bug for all, only
        # obvious for len(diff_sort) ==  1
        if pyfusion.DBG():
            print('last_small_diff_diffs_ind', last_small_diff_diffs_ind)
        debug_(pyfusion.DEBUG, 2, key='last_small')
        if last_small_diff_diffs_ind < 0:
            print('last_small_diff_diffs_ind = {lsdd} - error?  continuing...'.
                  format(lsdd=last_small_diff_diffs_ind))
            deltar, peaknoise, rmsnoise = 0, 0, 0
        else:
            deltar = mean(diff_sort[0:last_small_diff_diffs_ind + 1])
            peaknoise = max(
                abs(diff_sort[0:last_small_diff_diffs_ind + 1] - deltar))
            rmsnoise = std(diff_sort[0:last_small_diff_diffs_ind + 1] - deltar)
        pktopk = max(arr) - min(arr)
        if (verbose > 0) or (peaknoise / pktopk > 1e-7):
            print(
                'over averaging interval relative numerical noise ~ %.2g pk, %.2g RMS'
                % (peaknoise / pktopk, rmsnoise / pktopk))

        if verbose > 2:
            st = str(
                "save_compress trying to discretise\naveraging over %d diff diffs meeting criterion < %g "
                % (last_small_diff_diffs_ind, deltar / 2))
            print(st)
            pl.plot(diff_sort, hold=0)
            pl.title(st)
            pl.show()
        if verbose > 10:
            dbg = 0
            dbg1 = 1 / dbg  # a debug point

    if verbose > 1: print('seems like minimum difference is %g' % deltar)
    iarr = (0.5 + (arr - min(arr)) / deltar).astype('i')
    remain = iarr - ((arr - min(arr)) / deltar)
    remainck = mod((arr - min(arr)) / deltar, 1)

    # remain is relative to unit step, need to scale back down, over whole array
    maxerr = max(abs(remain)) * deltar / (max(arr) - min(arr))
    # not clear what the max expected error is - small for 12 bits, gets larger quicly
    if (verbose > 2) and maxerr < eps: print("appears to be successful")
    if verbose > 0:
        print('maximum error with eps = %g, is %g, %.3g x eps' %
              (eps, maxerr, maxerr / eps))

    # only use unsigned ints if we are NOT delta_encoding and signal >0
    if (delta_encode == False and min(iarr) >= 0):
        if max(iarr) < 256:
            iarr = iarr.astype(uint8)
            if verbose > 1: print('using 8 bit uints')

        elif max(iarr) < 16384:
            iarr = iarr.astype(uint16)
            if verbose > 1: print('using 16 bit uints')

    else:
        if max(iarr) < 128:
            iarr = iarr.astype(int8)
            if verbose > 1: print('using 8 bit ints')

        elif max(iarr
                 ) < 8192:  # why is this so conservative?  I would think 32766
            iarr = iarr.astype(int16)
            if verbose > 1: print('using 16 bit ints')
    # if not any of the above, stays as an int32

    return ({
        'iarr': iarr,
        'maxerror': maxerr,
        'deltar': deltar,
        'minarr': min(arr),
        'intmax': max(iarr)
    })
예제 #18
0
def read_text_pyfusion(files, target=b'^Shot .*', ph_dtype=None, plot=pl.isinteractive(), ms=100, hold=0, debug=0, quiet=1,  maxcpu=1, exception = Exception):
    """ Accepts a file or a list of files, returns a list of structured arrays
    See merge ds_list to merge and convert types (float -> pyfusion.prec_med
    """
    regulator = pyfusion.utils.Regulator(maxcpu)
    st = seconds(); last_update=seconds()
    file_list = files
    if len(np.shape(files)) == 0: file_list = [file_list]
    f='f8'
    if ph_dtype is None: ph_dtype = [('p12',f),('p23',f),('p34',f),('p45',f),('p56',f)]
    #ph_dtype = [('p12',f)]
    ds_list =[]
    comment_list =[]
    count = 0
    for (i,filename) in enumerate(file_list):
        regulator.wait()
        if seconds() - last_update > 10:
            last_update = seconds()
            tot = len(file_list)
            print('read {n}/{t}: ETA {m:.1f}m {f}'
                  .format(f=filename, n=i, t=tot,
                          m=(seconds()-st)*(tot-i)/float(60*i)))

        try:
            if (isinstance(target,str) or isinstance(target,bytes)): 
                skip = 1+find_data(filename, target, debug=debug)
            elif isinstance(target, int): 
                skip = target
            else:
                raise Exception('target ({target}) is not recognised'.format(target=target))
            if quiet == 0:
                print('{t:.1f} sec, loading data from line {s} of {f}'
                      .format(t = seconds()-st, s=skip, f=filename))
            #  this little bit to determine layout of data
            # very inefficient to read twice, but in a hurry!
            if debug>2: print('skiprows = \n', skip-1)
            txt = np.loadtxt(fname=filename, skiprows=skip-1, dtype=bytes, 
                             delimiter='FOOBARWOOBAR',ndmin=1)
            header_toks = txt[0].split()
            # look for a version number first
            if header_toks[-1][-1] in b'0123456789.':
                version = float(header_toks.pop())
                if b'ersion' not in header_toks.pop():
                    raise ValueError('Error reading header in {f}'
                                     .format(f=filename))
            else: version=-1  # pre Aug 12 2013
            # noticed that the offset moved in 2015 - when did it  happen?
            phase_offs = -4 if sys.version>'3,' else -2
            # is the first character of the 2nd last a digit?
            if header_toks[phase_offs][0] in b'0123456789': 
                if pyfusion.VERBOSE > 0: 
                    print('header toks', header_toks)
                    print('found new header including number of phases')
                n_phases = int(header_toks[phase_offs])
                ph_dtype = [('p{n}{np1}'.format(n=n,np1=n+1), f) for n in range(n_phases)]
                
            if 'frlow' in header_toks:  # add the two extra fields
                fs_dtype= [ ('shot','i8'), ('t_mid','f8'), 
                            ('_binary_svs','u8'),    # f16 - really want u8 here,  but npyio 
                                                      #has problem converting 10000000000000000000000000
                                                      #OverflowError: Python int too large to convert to C long
                                                      # doesn't happen if text is read in directly with loadtxt
                            ('freq','f8'), ('amp', 'f8'), ('a12','f8'),
                            ('p', 'f8'), ('H','f8'), 
                            ('frlow','f8'), ('frhigh', 'f8'),('phases',ph_dtype)]
            else:
                fs_dtype= [ ('shot','i8'), ('t_mid','f8'), 
                            ('_binary_svs','u8'), 
                            ('freq','f8'), ('amp', 'f8'), ('a12','f8'),
                            ('p', 'f8'), ('H','f8'), ('phases',ph_dtype)]

            if version > 0.69:  # don't rely on precision
                fs_dtype.insert(-1,('cpkf', 'f8'))  # -1 is 1 before the end
                fs_dtype.insert(-1,('fpkf', 'f8'))  # they appear in this order
                
            if pyfusion.VERBOSE > 0: 
                print(version, fs_dtype, '\n')

            ds = np.loadtxt(fname=filename, skiprows = skip, 
                            dtype= fs_dtype, ndmin=1)  # ENSURE a 1D array

            if len(ds) > 0:
                ds_list.append(ds)
                count += 1
                # npz reads in python 2 can't cope with unicode - don't report errors unless really debugging
                comment_list.append(filename.encode(errors=['ignore','strict'][pyfusion.DBG() > 5]))
            else:
                print('no data in {f}'.format(f=filename))

        except ValueError as reason:
            print('Conversion error while processing {f} with loadtxt - {reason} {args}'
                  .format(f=filename, reason=reason, args=reason.args))
            traceback.print_exc()

        except exception as info:
            print('Other exception while reading {f} with loadtxt - {info} {a}'.format(f=filename, info=info, a=info.args))
            traceback.print_exc()
    print("{c} out of {t} files".format(c=count, t=len(file_list)))
    if plot>0 and len(ds_list)>0: 
        plot_fs_DA(ds_list[0], ms=ms)
    return(ds_list, comment_list)