예제 #1
0
파일: sheq.py 프로젝트: pdvanmeter/meSXR
def sheq_averages(shot, time, return_all=False):
    """
    Returns flux-surface averaged quantities of physical significance.
    
    Inputs:
        - shot = MST shot number, corresponding to an existing NCT run.
        - time = Time point in ms, corresponding to an existing NCT run.
        - return_all = Set to True to instead return the full sh_averages.pro structure.
    """
    idl = pidly.IDL()
    idl('.r /home/pdvanmeter/lib/idl/startup.pro')
    idl('.r run_sheq')
    idl('averages = sheq_flux_avg({0:}, {1:})'.format(shot, time))
    avg = AttrDict(idl.ev('averages'))
    idl.close()
    
    if return_all:
        return avg
    else:
        return AttrDict({
            'rhop':avg.rhop,
            'rhoh':avg.rhoh,
            'J_pol':avg.jpol/1e6,
            'J_tor':avg.jtor/1e6,
            'B_pol':avg.bpol,
            'B_tor':avg.btor,
            'B':avg.b,
            'B2':avg.b2,
            'JB':-avg.jb/1e6,
            'J_para':-avg.jb/avg.b/1e6,
            'mua':-avg.mua,
            'q':-avg.q,
            'delta':float(avg.delta_axis)
        })
예제 #2
0
    def __init__(self,
                 shot,
                 time_ms,
                 delta_t=1.0,
                 diagnostics=None,
                 load_shot=True,
                 woutfile='/home/boguski/IDSII_local/wout_inb.nc'):
        # Load the VMEC reconstruction file
        self.woutfile = woutfile
        grid = vmec.get_VMEC_grid(woutfile=self.woutfile)
        self.R0 = 1.5  #grid['R0']

        # Set up the VMEC grid
        self.R_flux = grid['R_flux']
        self.Z_flux = grid['Z_flux']
        self.v_arr = grid['v_arr']
        self.u_arr = grid['u_arr'][0, :, 0]
        self.s_arr = grid['s_arr']

        # Get the magnetic phase
        mags = ops.get_magnetics(shot, delta_t=delta_t)
        tn = np.argmin(np.abs(mags['Time'] - time_ms))
        self.delta_n = mags['BP']['N05']['Phase'][tn]

        # Check for default diagnostics
        self.diagnostics = {}
        if diagnostics is None:
            diagnostics = default_diagnostics

        # Add flux surface profiles
        self.flux = AttrDict({})
        for name in diagnostics:
            self.add_diagnostic(name, diagnostics[name])
예제 #3
0
파일: nickal2.py 프로젝트: pdvanmeter/meSXR
def load_brightness(shot_num, t_start=8.0, t_end=28.0, delta=0.1, smooth=10.0):
    """
    Function: st = load_brightness(shot_num, t_start, t_end, delta, smooth)
        This version of load_brightness interfaces directly with the IDL implementation, via the pidly interface.
    Inputs:
        - shot_num = [INT] The MST shot ID for the desired set of data
        - t_start = [FLOAT] The start time for the desired interval of SXR data.
        - t_end = [FLOAT] The end time for the desired interval of SXR data.
        - delta = [FLOAT] The desired sampling window for SXR data.
        - smooth = [FLOAT] The size of the smoothing window (10.0 is standard).
    Outputs:
        - st['key'] = [DICT] Nested dictionary containing the SXR tomography diagnostic data, indexed by camera label.
    """
    # Access (and initialize, if needed) the pidly object and assemble the command string
    idl = pidly.IDL()
    idl_str = "n2d = NICKAL2_signal(" + str(shot_num) + ", tstart=" + str(
        t_start)
    idl_str += ", tend=" + str(t_end) + ", delta=" + str(
        delta) + ", sm=" + str(smooth) + ")"
    idl('cd, "/home/pdvanmeter/lib/idl"')
    idl('.r NICKAL2_signal')
    idl(idl_str)

    # Extract the data from IDL and format
    data = {
        'AlBe': idl.ev('n2d.data.al'),
        'SiBe': idl.ev('n2d.data.si'),
        'ZrMylar': idl.ev('n2d.data.zr')
    }

    error = {
        'AlBe': idl.ev('n2d.err.al'),
        'SiBe': idl.ev('n2d.err.si'),
        'ZrMylar': idl.ev('n2d.err.zr')
    }

    noise = {
        'AlBe': idl.ev('n2d.noise.al'),
        'SiBe': idl.ev('n2d.noise.si'),
        'ZrMylar': idl.ev('n2d.noise.zr')
    }

    tiempo = idl.ev('n2d.time')

    # Put the result together
    st = {'bright': data, 'sigma': error, 'noise': noise, 'time': tiempo}

    idl.close()
    return AttrDict(st)
예제 #4
0
파일: sheq.py 프로젝트: pdvanmeter/meSXR
def get_flux_grid(shot, time, phi=222.5*deg2rad):
    """
    Use SHEq to directly access the NCT output to evaluate rho on a grid of (R,Z) points.
    
    Inputs:
        - shot = MST shot number, corresponding to an existing NCT run.
        - time = Time point in ms, corresponding to an existing NCT run.
        - phi = Toroidal angle in radians.
    """
    idl = pidly.IDL()
    idl('.r /home/pdvanmeter/lib/idl/startup.pro')
    idl('.r run_sheq')
    idl('flux = run_sheq({0:}, {1:}, phi0={2:})'.format(shot, time, phi))
    flux_grid = idl.ev('flux')
    idl.close()
    
    flux_grid['rho'] = flux_grid['rho'].T
    return AttrDict(flux_grid)
예제 #5
0
파일: nickal2.py 프로젝트: pdvanmeter/meSXR
def get_NickAl2_data(shot,
                     t_start=0.0,
                     t_end=60.0,
                     off_start=75.0,
                     off_end=80.0,
                     dt=0.5):
    """
    """
    nickal2_data = {'bright': {}, 'sigma': {}}
    mstTree = MDSplus.Tree('MST', shot, 'READONLY')

    for filt in filters:
        node_str = '\\MST_MISC::DD_{0:}_{1:02d}'.format(*nickal2_probes[filt])
        #node_str = nickal2_nodes[filt]
        sxr_node = mstTree.getNode(node_str)
        sxr_data = sxr_node.getData().data()
        sxr_time = sxr_node.getData().dim_of().data() * 1000.
        sxr_gain = mstTree.getNode(node_str + '_AMP').getData().data()
        #sxr_gain = mstTree.getNode(n2amp_nodes[filt]).getData().data()

        # Convert to brightness units [W / m^2 /sr]
        sxr_data = ((sxr_data / sxr_gain) * 3.63) / eta

        # Subtract off the offset using the end of the shot (much cleaner than before)
        ns = np.argmin(np.abs(sxr_time - off_start))
        nf = np.argmin(np.abs(sxr_time - off_end))
        sxr_data -= np.average(sxr_data[ns:nf + 1])

        # Restrict data to desired interval
        ns = np.argmin(np.abs(sxr_time - t_start))
        nf = np.argmin(np.abs(sxr_time - t_end))
        sxr_data = sxr_data[ns:nf + 1]
        sxr_time = sxr_time[ns:nf + 1]

        # Smooth approriately
        data_rs, t_rs, sigma = smooth_signal(sxr_data, sxr_time, dt=dt)
        nickal2_data['bright'][filt] = data_rs
        nickal2_data['sigma'][filt] = sigma

    # Time should be the same for each signal
    nickal2_data['time'] = t_rs
    return AttrDict(nickal2_data)
예제 #6
0
def load_raw_data(shot,
                  avg_bad_pix=True,
                  avg_axis=0,
                  remove_edges=False,
                  key='MST',
                  center=False):
    """
    This function loads the raw image data for a shot from the tree. If projection=True then a
    summation is also performed along the specified axis.
    
    TODO: Allow additional masking of specified pixels (i.e. outer pixels).
    """
    # Get the image data
    mesxr = mds.Tree('mst_me_sxr', shot, 'READONLY')
    mesxr_ext = mds.Tree('me_sxr_ext', shot, 'READONLY')
    images_node = mesxr_ext.getNode(r'.ME_SXR_EXT.IMAGES')
    images = images_node.getData().data()
    time = images_node.dim_of().data()

    # Get the config data
    exp_time = mesxr.getNode(r'.CONFIG:EXPOSUR_TIME').getData().data()
    exp_period = mesxr.getNode(r'.CONFIG:EXPOSUR_PER').getData().data()
    exp_delay = mesxr.getNode(r'.CONFIG:DELAY').getData().data()
    n_images = mesxr.getNode(r'.CONFIG:N_IMAGES').getData().data()
    thresholds = mesxr.getNode(r'.CONFIG:E_THRESH_MAP').getData().data()
    thresholds = np.around(thresholds.astype(float), decimals=1)

    vtrm = mesxr.getNode(r'.CONFIG:V_TRM').getData().data()
    vcmp = mesxr.getNode(r'.CONFIG:V_COMP').getData().data()
    vcca = mesxr.getNode(r'.CONFIG:V_CCA').getData().data()
    vrf = mesxr.getNode(r'.CONFIG:V_RF').getData().data()
    vrfs = mesxr.getNode(r'.CONFIG:V_RFS').getData().data()
    vcal = mesxr.getNode(r'.CONFIG:V_CAL').getData().data()
    vdel = mesxr.getNode(r'.CONFIG:V_DEL').getData().data()
    vadj = mesxr.getNode(r'.CONFIG:V_ADJ').getData().data()

    # Deal with bad pixels - set to zero or average with surrounding pixels
    bad_pixels = mesxr.getNode(r'.CONFIG:BAD_PX_MAP').getData().data()
    bad_pixels[155, 21] = 1
    bad_pixels[179, 76] = 1

    if avg_bad_pix:
        for x in range(NUM_PIX_X):
            for y in range(NUM_PIX_Y):
                if bad_pixels[x, y]:
                    if avg_axis == 0:
                        for frame in range(len(time)):
                            if x == 0:
                                images[x, y, frame] = images[x + 1, y, frame]
                            elif x == NUM_PIX_X - 1:
                                images[x, y, frame] = images[x - 1, y, frame]
                            else:
                                images[x, y, frame] = np.average([
                                    images[x - 1, y, frame], images[x + 1, y,
                                                                    frame]
                                ])
                    elif avg_axis == 1:
                        for frame in range(len(time)):
                            if y == 0:
                                images[x, y, frame] = images[x, y + 1, frame]
                            elif y == NUM_PIX_Y - 1:
                                images[x, y, frame] = images[x, y - 1, frame]
                            else:
                                images[x, y, frame] = np.average([
                                    images[x, y - 1, frame], images[x, y + 1,
                                                                    frame]
                                ])
                    else:
                        print('ERROR: averaging axis not recognized.')

    # Zero out the boundary pixels
    for coords in get_boundary_coords():
        images[coords[0], coords[1], :] = 0

    # Zero out edge pixels if requested
    if remove_edges:
        for coords in get_edge_coords():
            images[coords[0], coords[1], :] = 0

    # Zero out everything but central pixels, if requested
    if center:
        mask = np.zeros([487, 195])
        mask[:, 65:130] = 1
        for frame in range(len(time)):
            images[:, :, frame] *= mask

    settings = {
        'vtrm': vtrm,
        'vcmp': vcmp,
        'vcca': vcca,
        'vrf': vrf,
        'vrfs': vrfs,
        'vcal': vcal,
        'vdel': vdel,
        'vadj': vadj
    }
    config = {
        'exp_time': exp_time,
        'exp_period': exp_period,
        'exp_delay': exp_delay,
        'n_images': n_images,
        'bad_pix': bad_pixels,
        'setdacs': settings
    }

    data_dict = {
        'images': images,
        'time': time,
        'shot': shot,
        'config': config,
        'thresholds': thresholds,
        'rm_edges': remove_edges
    }

    # Load in the impact parameters
    impact_p, impact_phi = get_impact_params()
    data_dict['impact_p'] = impact_p
    data_dict['impact_phi'] = impact_phi

    return AttrDict(data_dict)
예제 #7
0
def invert_brightness(shot_num,
                      t_start=8.0,
                      t_end=28.0,
                      delta=0.1,
                      smooth=10.0,
                      exclude=[20, 59, 60],
                      thick=False,
                      thin=False,
                      **kwargs):
    """
    Perform a tomographic inversion using the Cormack-Bessel technique. Makes use of Paolo's IDL library.
    Inputs:
        - shot_num = [INT] The MST shot ID for the desired set of data
    Optional:
        - t_start = [FLOAT] The start time for the desired interval of SXR data.
        - t_end = [FLOAT] The end time for the desired interval of SXR data.
        - delta = [FLOAT] The desired sampling window for SXR data.
        - smooth = [FLOAT] The size of the smoothing window (10.0 is standard).
        - exclude = [[INT]] List of logicals which were excluded in the data file. By default excludes the NickAl2
            channels. For older data make sure to still exclude 69.
        - thick = [BOOL] Include only thick filter data.
        - thin = [BOOL] Include only thick filter data.
        - kwargs = Additional keywords are used to change the inversion

    Inversion keywords: Other keyword arguments will be used to alter the inversion options array passed
        directly to the IDL routine. The allowed arguments, according to the IDL documentation, are:
        ka = $
        [ $
          'name=Cormack'          # inversion method
          'base=Bessel'           # radial function base
          'matname=matrix.dat'    # not important
          'mc=1'                  # n. of angular (poloidal) cos components
          'ms=1'                  # n. of angular (poloidal) sin components
          'ls=6'                  # n. of radial components
          'svd_tol=0.100'         # svd threshold
          'p_ref=[0.0,0,0,0.0]'   # coordinates of the origin of the axis
                                  # where the inversion will be performed
          'n_nch=5'               # n. of added edge lines of sight
          'mst'=1                 # specifies that we are working on Mst SXR data
        ]
        
        Note: The values of p_ref are [x0, y0, z0], signifying the location of the magnetic axis. These
            values are measured in meters, and z0 will typically be set to zero. The defaults arguments are
            p_ref = [0.06, 0.0, 0.0], signifying a 6cm Shafranov shift.
        Note: The value of svd_tol is the tolerance for stopping the iterative SVD inversion process. The
            default is 0.06, but can be increased (up to 0.1) as needed to smooth the reconstruction.
    """
    # Load the data into idl
    idl = pidly.IDL()
    idl_str = "staus = sxr_mst_get_signals(St, /ms, shot=" + str(
        shot_num) + ", tst = " + str(t_start)
    idl_str += ", excl = " + str(exclude) + ", tend = " + str(t_end)
    idl_str += ", delta = " + str(delta) + ", sm = " + str(smooth) + ')'
    idl(idl_str)

    # Format keywords - check for supplied arguments and change if necessary
    ka = {
        'name': 'Cormack',
        'base': 'bessel',
        'matname': 'matrix.dat',
        'mc': 1,
        'ms': 1,
        'ls': 6,
        'svd_tol': 0.06,
        'p_ref': [0.06, 0.00, 0.0]
    }

    if len(kwargs) > 0:
        for key, value in kwargs.items():
            ka[key] = value

    ka_str = 'ka = ' + str(
        ['{0:}={1:}'.format(key, value) for key, value in ka.items()])
    idl(ka_str)
    idl_str = "st_out = sxr_MST_get_emiss(st, st_emiss=st_emiss, status=status, ka=ka"
    if thick:
        idl_str += ', /thick'
    elif thin:
        idl_str += ', /thin'
    idl_str += ')'

    # Do the inversion
    idl(idl_str)

    # Import the data into python
    results = {
        'emiss': idl.ev('st_emiss.emiss', use_cache=True).T,
        'time': idl.ev('st_emiss.t', use_cache=True),
        'xs': idl.ev('st_emiss.x_emiss', use_cache=True),
        'ys': idl.ev('st_emiss.y_emiss', use_cache=True),
        'major': idl.ev('st_emiss.majr'),
        'radius': idl.ev('st_emiss.radius'),
        'kwargs': ka
    }

    # Close the IDL instance to prevent runaway processes
    idl.close()

    return AttrDict(results)
예제 #8
0
def load_brightness(shot_num,
                    t_start=8.0,
                    t_end=28.0,
                    delta=0.1,
                    smooth=10.0,
                    exclude=[20, 59, 60],
                    adj=None,
                    cmd_str=''):
    """
    Function: st = load_brightness(shot_num, t_start, t_end, delta, smooth, exclude, adj, cmd_str)
    This version of load_brightness interfaces directly with the IDL implementation, via the pidly interface
        (see the signals README for instructions on setting this up).
    Inputs:
        - shot_num = [INT] The MST shot ID for the desired set of data
        - t_start = [FLOAT] The start time for the desired interval of SXR data.
        - t_end = [FLOAT] The end time for the desired interval of SXR data.
        - delta = [FLOAT] The desired sampling window for SXR data. Set to None to get the full signal. Note that
            sampling error will not be available in that case.
        - smooth = [FLOAT] The size of the smoothing window (10.0 is standard).
        - exclude = [[INT]] List of logicals which were excluded in the data file. By defaults excludes the NickAl2
            channels. For older data make sure to exclude 69
        - adj = [[FLOAT]] The time adjustment due to SXR trigger settings. Set to 0 for data past mid-2016
        - cmd_str = [STR] Additional string to append to the IDL command.
    Outputs:
        - st['key'] = [DICT] Nested dictionary containing the SXR tomography diagnostic data, indexed by camera label.
    """
    # Access (and initialize, if needed) the pidly object and assemble the command string
    idl = pidly.IDL()
    idl_str = "staus = sxr_mst_get_signals(St, /ms, shot=" + str(
        shot_num) + ", tst = " + str(t_start) + ", excl = " + str(exclude)
    idl_str += ", tend = " + str(t_end)

    # Optional keywods
    if delta is not None:
        idl_str += ", delta = " + str(delta)

    if smooth is not None:
        idl_str += ", sm = " + str(smooth)

    if len(cmd_str) == 0:
        idl_str += ")"
    else:
        idl_str += ", " + cmd_str + ")"

    # Create the structure in IDL and begin exporting the contents
    idl(idl_str)
    sxr_data = idl.ev('st.bright.data', use_cache=True)
    sxr_impact = idl.ev('st.bright.prel', use_cache=True)
    sxr_angles = idl.ev('st.bright.phi', use_cache=True)
    sxr_noise = idl.ev('st.bright.off_str.sxr_r_noise', use_cache=True)
    sxr_time = idl.ev('st.bright.time', use_cache=True)

    if delta is not None:
        sxr_error = idl.ev('st.bright.err', use_cache=True)

    # Organize the data by probe label and thick/thin convention. Now automated to account for excluded probes
    filt_list = [
        'A thick', 'B thick', 'C thick', 'D thick', 'A thin', 'B thin',
        'C thin', 'D thin'
    ]

    brightness = {}
    impact_p = {}
    impact_angle = {}
    sigma = {}
    off_noise = {}
    logicals_inc = {}

    # Manual index in order to allow exclusion of specified logicals
    index = 0

    for filt in filt_list:
        base_logical = filt_list.index(filt) * 10 + 1

        data = []
        error = []
        impact = []
        angles = []
        noise = []
        logs = []
        for logical in range(base_logical, base_logical + 10):
            if logical not in exclude:
                data.append(np.transpose(sxr_data[:, index]))
                impact.append(sxr_impact[index])
                angles.append(sxr_angles[index])
                noise.append(sxr_noise[index])
                logs.append(logical)

                if delta is not None:
                    error.append(np.transpose(sxr_error[:, index]))
                else:
                    error.append([])

                index += 1

        # Store into the dictionary
        brightness[filt] = np.array(data)
        sigma[filt] = np.array(error)
        impact_p[filt] = np.array(impact)
        impact_angle[filt] = np.array(angles)
        off_noise[filt] = np.array(noise)
        logicals_inc[filt] = logs

    # Also store same basic configuration info - logical indexing is fine
    config = {
        'filters': idl.ev('st.bright.FILTERBE_THICK', use_cache=True),
        'alpha': idl.ev('st.bright.alfa', use_cache=True),
        'gain': idl.ev('st.bright.gain', use_cache=True),
        'insertion': idl.ev('st.bright.insertion', use_cache=True)
    }

    # Assemble these into a single dictionary
    st = {
        'bright': brightness,
        'p': impact_p,
        'phi': impact_angle,
        'sigma': sigma,
        'noise': sigma,
        'logical': logicals_inc,
        'shot': shot_num,
        'time': sxr_time,
        'config': config
    }

    # Close the IDL instance to prevent runaway processes
    idl.close()

    return AttrDict(st)
예제 #9
0
def get_magnetics(shot,
                  t_start=10.0,
                  t_end=60.0,
                  delta_t=0.1,
                  components=['BP', 'BT'],
                  modes=range(5, 16),
                  dom_mode='N05'):
    # Container for the results
    mag = {}

    # Format mode numbers to string from. "N05', 'N06', etc.
    modes_str = ['N' + str(x).zfill(2) for x in modes]

    # Connect to the tree
    try:
        mstTree = MDSplus.Tree('mst', shot, 'READONLY')
    except:
        print('ERROR: Shot not found')

    # Variables to assist with loading the time base
    time_loaded = False
    n_start = 0
    n_end = 0
    delta_n = 0

    # Iterate over the mode strings and load the desired data
    for comp in components:
        mag[comp] = {}
        for mode in modes_str:
            # Load in the data
            try:
                phs_node = mstTree.getNode('\\MST_MAG::' + comp + '_' + mode +
                                           '_PHS')
                amp_node = mstTree.getNode('\\MST_MAG::' + comp + '_' + mode +
                                           '_AMP')
                vel_node = mstTree.getNode('\\MST_MAG::' + comp + '_' + mode +
                                           '_VEL')
            except:
                print('Error loading node. Please check shot number.')
            else:
                mag[comp][mode] = {}
                phs = phs_node.getData().data()
                amp = amp_node.getData().data()
                vel = vel_node.getData().data()

                # Record the time base for the first iteration only, and downsample it
                if not time_loaded:
                    t_mag = phs_node.getData().dim_of().data() * 1000.  # in ms
                    t_smooth = np.around(np.float64(t_mag),
                                         3)  # Deal with rounding problems
                    n_start = np.argmin(np.abs(t_smooth -
                                               t_start))  # Find starting index
                    n_end = np.argmin(np.abs(t_smooth -
                                             t_end))  # Find the ending index
                    delta_n = int((delta_t) /
                                  (t_smooth[1] -
                                   t_smooth[0]))  # The spacing between samples

                    mag['Time'] = t_smooth[n_start:n_end + 1:delta_n]
                    time_loaded = True

                # Downsample the magnetic field data
                mag[comp][mode]['Phase'] = phs[n_start:n_end + 1:delta_n]
                mag[comp][mode]['Amplitude'] = amp[n_start:n_end + 1:delta_n]
                mag[comp][mode]['Velocity'] = vel[n_start:n_end + 1:delta_n]

    # Compute the spectral index
    total_mode_energy = 0.0
    for mode in modes_str:
        total_mode_energy += (mag['BP'][mode]['Amplitude']**2 +
                              mag['BT'][mode]['Amplitude']**2)

    spec = 0.0
    for mode in modes_str:
        spec += ((mag['BP'][mode]['Amplitude']**2 +
                  mag['BT'][mode]['Amplitude']**2) / total_mode_energy)**2

    # Include this into the magnetics dictionary
    mag['Index'] = 1. / spec
    mag['Avg Index'] = np.average(mag['Index'])

    # Compute the total dominant and secondary mode amplitudes
    if dom_mode in modes_str:
        mag['Dominant'] = np.sqrt(mag['BP'][dom_mode]['Amplitude']**2 +
                                  mag['BT'][dom_mode]['Amplitude']**2)
        mag['Secondary'] = np.zeros(len(mag['Dominant']))
        for mode in modes_str:
            if mode != dom_mode:
                mag['Secondary'] += mag['BP'][mode]['Amplitude']**2 + mag[
                    'BT'][mode]['Amplitude']**2
        mag['Secondary'] = np.sqrt(mag['Secondary'])

    # Include equilibrium measurements
    bp_node = mstTree.getNode('\\MST_MAG::BP_TORARR_EQUIL')
    bp = bp_node.data()
    bp_time = bp_node.dim_of().data() * 1000.
    mag['BP']['EQ'] = bp[n_start:n_end + 1:delta_n]

    bt_node = mstTree.getNode('\\MST_MAG::BT_TORARR_EQUIL')
    bt = bt_node.data()
    bt_time = bt_node.dim_of().data() * 1000.
    mag['BT']['EQ'] = bt[n_start:n_end + 1:delta_n]

    return AttrDict(mag)