示例#1
0
    def test_remove_mean_dataset(self):
        multichannel_data_1 = get_multimode_test_data(channels=get_n_channels(10),
                                                      timebase = Timebase(np.arange(0.0,0.01,1.e-5)),
                                                      modes = [mode_1, mode_2],
                                                      noise = 0.2)
        multichannel_data_2 = get_multimode_test_data(channels=get_n_channels(15),
                                                      timebase = Timebase(np.arange(0.0,0.01,1.e-5)),
                                                      modes = [mode_4, mode_5],
                                                      noise = 0.7)
        multichannel_data_3 = get_multimode_test_data(channels=get_n_channels(13),
                                                      timebase = Timebase(np.arange(0.0,0.01,1.e-5)),
                                                      modes = [mode_4, mode_5],
                                                      noise = 0.7)
        # add some non-zero offset
        multichannel_data_1.signal += np.random.rand(*multichannel_data_1.signal.shape)
        multichannel_data_2.signal += np.random.rand(*multichannel_data_2.signal.shape)

        test_dataset = pyfusion.data.base.DataSet('test_dataset')

        test_dataset.add(multichannel_data_1)
        test_dataset.add(multichannel_data_2)


        filtered_data = test_dataset.subtract_mean()
        for d in filtered_data:
            mean_filtered_data = np.mean(d.signal, axis=1)
            assert_array_almost_equal(mean_filtered_data, np.zeros_like(mean_filtered_data))
示例#2
0
    def do_fetch(self):
        chan_name = (self.diag_name.split('-'))[-1]  # remove -
        filename_dict = {'diag_name': chan_name, 'shot': self.shot}

        self.basename = path.join(pf.config.get('global', 'localdatapath'),
                                  data_filename % filename_dict)

        files_exist = path.exists(self.basename)
        if not files_exist:
            raise Exception, "file " + self.basename + " not found."
        else:
            signal_dict = newload(self.basename)

        if ((chan_name == array(['MP5', 'HMP13', 'HMP05'])).any()): flip = -1.
        else: flip = 1.
        if self.diag_name[0] == '-': flip = -flip
        #        coords = get_coords_for_channel(**self.__dict__)
        ch = Channel(self.diag_name, Coords('dummy', (0, 0, 0)))
        output_data = TimeseriesData(
            timebase=Timebase(signal_dict['timebase']),
            signal=Signal(flip * signal_dict['signal']),
            channels=ch)
        output_data.meta.update({'shot': self.shot})

        return output_data
示例#3
0
    def test_stored_metadata_datasets(self):
        """Make sure metadata attached to dataset classes is saved to sql."""
        n_ch = 3
        n_samples = 1024
        multichannel_data = get_multimode_test_data(channels=get_n_channels(n_ch),
                                                    timebase = Timebase(np.arange(n_samples)*1.e-6),
                                                    noise = 0.01)
        # put in some fake metadata 
        multichannel_data.meta = {'hello':'world'}
        #print multichannel_data.meta


        # produce a dataset of flucstrucs
        fs_data = multichannel_data.flucstruc(min_dphase = -2*np.pi)

        # check that metadata is carried to the flucstrucs

        self.assertEqual(fs_data.meta, multichannel_data.meta)

        # save our dataset to the database
        fs_data.save()

        if pyfusion.orm_manager.IS_ACTIVE:
            session = pyfusion.orm_manager.Session()
            some_ds = session.query(DataSet).all().pop()
            self.assertEqual(some_ds.meta, multichannel_data.meta)
示例#4
0
    def test_sp_filter_butterworth_bandpass(self):
        n_ch = 3
        n_samples = 1024
        sample_period = 1.e-6
        # Let's generate a test signal with strong peaks at 20kHz and
        # 60kHz and a weaker peak at 40kHz.

        mode_20kHz = {
            'amp': 10.0,
            'freq': 20.0e3,
            'mode_number': 3,
            'phase': 0.2
        }
        mode_40kHz = {
            'amp': 2.0,
            'freq': 40.0e3,
            'mode_number': 4,
            'phase': 0.3
        }
        mode_60kHz = {
            'amp': 10.0,
            'freq': 60.0e3,
            'mode_number': 5,
            'phase': 0.4
        }

        multichannel_data = get_multimode_test_data(
            channels=get_n_channels(n_ch),
            timebase=Timebase(np.arange(n_samples) * sample_period),
            modes=[mode_20kHz, mode_40kHz, mode_60kHz],
            noise=0.1)

        filtered_data = multichannel_data.sp_filter_butterworth_bandpass(
            [35.e3, 45.e3], [25.e3, 55.e3], 1.0, 10.0)
示例#5
0
    def test_stored_metadata_data(self):
        """ metadata should be stored to data instances, rather than datasets - this might be slower, but more likely to guarantee data is kept track of."""
        n_ch = 3
        n_samples = 1024
        multichannel_data = get_multimode_test_data(channels=get_n_channels(n_ch),
                                                    timebase = Timebase(np.arange(n_samples)*1.e-6),
                                                    noise = 0.01)




        # put in some fake metadata 
        multichannel_data.meta = {'hello':'world'}
        print multichannel_data.meta

        # produce a dataset of flucstrucs
        fs_data = multichannel_data.flucstruc(min_dphase = -2*np.pi)

        # check that metadata is carried to the individual flucstrucs
        for fs in fs_data:
            self.assertEqual(fs.meta, multichannel_data.meta)

        # save our dataset to the database
        fs_data.save()

        ## now test to make sure metadata is saved in database

        if pyfusion.orm_manager.IS_ACTIVE:
            session = pyfusion.orm_manager.Session()
            some_fs = session.query(FlucStruc).all().pop()
            self.assertEqual(some_fs.meta, multichannel_data.meta)
示例#6
0
def change_time_base(input_data, new_time_base):
    '''New from SH....
    '''
    from pyfusion.data.base import DataSet
    from pyfusion.data.timeseries import Signal, Timebase
    if isinstance(input_data, DataSet):
        #output_dataset = input_data.copy()
        #output_dataset.clear()
        output_dataset = DataSet(input_data.label+'_new_time_base')
        for data in input_data:
            try:
                output_dataset.append(data.change_time_base(new_time_base))
            except AttributeError:
                pyfusion.logger.warning("Data filter 'change_time_base' not applied to item in dataset")
        return output_dataset

    #cut the signal and timebase matrices to the correct size
    new_data = copy.deepcopy(input_data)
    n_channels = input_data.signal.shape[0]
    new_data.signal = Signal(np.zeros((n_channels,new_time_base.shape[0]),dtype=np.float32))
    new_data.timebase = Timebase(new_time_base) 
    for i in range(input_data.signal.shape[0]):
        new_data.signal[i,:] = np.interp(new_time_base, input_data.timebase, input_data.signal[i,:])

    #if input_data.signal.ndim == 1:
    #    input_data.signal = input_data.signal[new_time_args[0]:new_time_args[1]]
    #else:
    #    input_data.signal = input_data.signal[:,new_time_args[0]:new_time_args[1]]
    return new_data
示例#7
0
    def test_flucstruc_phases(self):
        n_ch = 10
        n_samples = 1024
        multichannel_data = get_multimode_test_data(channels=get_n_channels(n_ch),
                                                    timebase = Timebase(np.arange(n_samples)*1.e-6),
                                                    noise = 0.01)
        fs_data = multichannel_data.flucstruc(min_dphase = -2*np.pi)
        self.assertTrue(isinstance(fs_data, DataSet))
        self.assertTrue(len([i for i in fs_data.data]) > 0)
        E = 0.7**2 + 0.5**2
        for fs in fs_data:
            self.assertTrue(isinstance(fs, FlucStruc))
            # fs_data is not ordered, so we identify flucstrucs by the sv indicies
            if fs.svs == [0,1]:
                # check that freq is correct to within 1kHz
                self.assertAlmostEqual(1.e-4*fs.freq, 1.e-4*24.e3, 1)
                # 
                fake_phases = -3.0*2*np.pi*np.arange(n_ch+1)[:-1]/(n_ch)
                fake_dphases = fake_phases[1:]-fake_phases[:-1]

                test_dphase = fs.dphase
                # check phases within 0.5 rad
                assert_array_almost_equal(test_dphase, fake_dphases, 1)
                # check fs energy is correct to 3 decimal places
                self.assertAlmostEqual(fs.p, 0.7**2/E, 3)
            if fs.svs == [2,3]:
                self.assertAlmostEqual(1.e-4*fs.freq, 1.e-4*37.e3, 1)
                fake_phases = -4.0*2*np.pi*np.arange(n_ch+1)[:-1]/(n_ch)
                fake_dphases = fake_phases[1:]-fake_phases[:-1]

                test_dphase = fs.dphase
                # check phases within 0.5 rad
                assert_array_almost_equal(test_dphase, fake_dphases, 1)
                # check fs energy is correct to 3 decimal places
                self.assertAlmostEqual(fs.p, 0.5**2/E, 3)
示例#8
0
    def test_correlate(self):
        multichannel_data = get_multimode_test_data(channels=get_n_channels(2),
                                                    timebase = Timebase(np.arange(0.0,0.01,1.e-5)),
                                                    noise = 0.2)
        numpy_corr = np.correlate(multichannel_data.signal[0], multichannel_data.signal[1])

        pyfusion_corr = multichannel_data.correlate(0,1)
        assert_array_almost_equal(numpy_corr, pyfusion_corr)
示例#9
0
 def test_timebase_and_coords(self):
     n_ch = 10
     n_samples = 1024
     timebase = Timebase(np.arange(n_samples)*1.e-6)
     channels = ChannelList(*(Channel('ch_%d' %i, Coords('cylindrical',(1.0,i,0.0))) for i in 2*np.pi*np.arange(n_ch)/n_ch))
     multichannel_data = get_multimode_test_data(channels = channels,
                                                 timebase = timebase,
                                                 noise = 0.5)
示例#10
0
 def test_peak_freq(self):
     timebase = Timebase(np.arange(0.0, 0.01, 1.e-6))
     single_mode_signal = get_multimode_test_data(
         channels=get_n_channels(1), timebase=timebase, modes=[mode_3])
     p_f, p_f_elmt = peak_freq(single_mode_signal.signal[0],
                               single_mode_signal.timebase)
     # Check that we get mode_3 frequency of 27.0 kHz (to 1 decimal place).
     self.assertAlmostEqual(1.e-3 * p_f, 1.e-3 * mode_3['freq'], 1)
示例#11
0
    def test_svd_plot(self):
        n_ch = 4
        n_samples = 256
        multichannel_data = get_multimode_test_data(channels=get_n_channels(n_ch),
                                                    timebase = Timebase(np.arange(n_samples)*1.e-6),
                                                    noise = 0.5)

        test_svd = multichannel_data.svd()
        self.assertTrue(hasattr(test_svd, 'svdplot'))
示例#12
0
def fetch_data_from_file(fetcher):
    prm_dict = read_prm_file(fetcher.basename + ".prm")
    bytes = int(prm_dict['DataLength(byte)'][0])
    bits = int(prm_dict['Resolution(bit)'][0])
    if not (prm_dict.has_key('ImageType')):  #if so assume unsigned
        bytes_per_sample = 2
        dat_arr = Array.array('H')
        offset = 2**(bits - 1)
        dtype = np.dtype('uint16')
    else:
        if prm_dict['ImageType'][0] == 'INT16':
            bytes_per_sample = 2
            if prm_dict['BinaryCoding'][0] == 'offset_binary':
                dat_arr = Array.array('H')
                offset = 2**(bits - 1)
                dtype = np.dtype('uint16')
            elif prm_dict['BinaryCoding'][0] == "shifted_2's_complementary":
                dat_arr = Array.array('h')
                offset = 0
                dtype = np.dtype('int16')
            else:
                raise NotImplementedError, ' binary coding ' + prm_dict[
                    'BinaryCoding']

    fp = open(fetcher.basename + '.dat', 'rb')
    dat_arr.fromfile(fp, bytes / bytes_per_sample)
    fp.close()

    clockHz = None

    if prm_dict.has_key('SamplingClock'):
        clockHz = double(prm_dict['SamplingClock'][0])
    if prm_dict.has_key('SamplingInterval'):
        clockHz = clockHz / double(prm_dict['SamplingInterval'][0])
    if prm_dict.has_key('ClockSpeed'):
        if clockHz != None:
            pyfusion.utils.warn(
                'Apparent duplication of clock speed information')
        clockHz = double(prm_dict['ClockSpeed'][0])
        clockHz = LHD_A14_clk(fetcher.shot)  # see above
    if clockHz != None:
        timebase = arange(len(dat_arr)) / clockHz
    else:
        raise NotImplementedError, "timebase not recognised"

    ch = Channel("%s-%s" % (fetcher.diag_name, fetcher.channel_number),
                 Coords('dummy', (0, 0, 0)))
    if fetcher.gain != None:
        gain = fetcher.gain
    else:
        gain = 1
    output_data = TimeseriesData(timebase=Timebase(timebase),
                                 signal=Signal(gain * dat_arr),
                                 channels=ch)
    output_data.meta.update({'shot': fetcher.shot})

    return output_data
示例#13
0
 def test_dataset_filter_nocopy(self):
     n_ch = 10
     n_samples = 640
     timebase = Timebase(np.arange(n_samples)*1.e-6)
     channels = ChannelList(*(Channel('ch_%d' %i, Coords('cylindrical',(1.0,i,0.0))) for i in 2*np.pi*np.arange(n_ch)/n_ch))
     multichannel_data = get_multimode_test_data(channels = channels,
                                                 timebase = timebase,
                                                 noise = 0.5)
     dataset = multichannel_data.segment(64, copy=False)
     new_dataset = dataset.segment(16, copy=False)
示例#14
0
    def test_remove_mean_multichanel(self):
        multichannel_data = get_multimode_test_data(channels=get_n_channels(10),
                                                    timebase = Timebase(np.arange(0.0,0.01,1.e-5)),
                                                    noise = 0.2)
        # add some non-zero offset
        multichannel_data.signal += np.random.rand(*multichannel_data.signal.shape)

        filtered_data = multichannel_data.subtract_mean()
        mean_filtered_data = np.mean(filtered_data.signal, axis=1)
        assert_array_almost_equal(mean_filtered_data, np.zeros_like(mean_filtered_data))
示例#15
0
    def do_fetch(self):
        # TODO support non-signal datatypes
        if self.fetch_mode == 'thin client':
            ch = Channel(self.mds_path_components['nodepath'],
                         Coords('dummy', (0, 0, 0)))
            data = self.acq.connection.get(
                self.mds_path_components['nodepath'])
            dim = self.acq.connection.get('dim_of(%s)' %
                                          self.mds_path_components['nodepath'])
            # TODO: fix this hack (same hack as when getting signal from node)
            if len(data.shape) > 1:
                data = np.array(data)[0, ]
            if len(dim.shape) > 1:
                dim = np.array(dim)[0, ]
            output_data = TimeseriesData(timebase=Timebase(dim),
                                         signal=Signal(data),
                                         channels=ch)
            output_data.meta.update({'shot': self.shot})
            return output_data

        elif self.fetch_mode == 'http':
            data_url = self.acq.server + '/'.join([
                self.mds_path_components['tree'],
                str(self.shot), self.mds_path_components['tagname'],
                self.mds_path_components['nodepath']
            ])

            data = mdsweb.data_from_url(data_url)
            ch = Channel(self.mds_path_components['nodepath'],
                         Coords('dummy', (0, 0, 0)))
            t = Timebase(data.data.dim)
            s = Signal(data.data.signal)
            output_data = TimeseriesData(timebase=t, signal=s, channels=ch)
            output_data.meta.update({'shot': self.shot})
            return output_data

        else:
            node = self.tree.getNode(self.mds_path)
            if int(node.dtype) == 195:
                return get_tsd_from_node(self, node)
            else:
                raise Exception('Unsupported MDSplus node type')
示例#16
0
 def test_timeseries_filter_nocopy(self):
     # Use reduce_time filter for testing...
     n_ch = 10
     n_samples = 5000
     timebase = Timebase(np.arange(n_samples)*1.e-6)
     channels = ChannelList(*(Channel('ch_%d' %i, Coords('cylindrical',(1.0,i,0.0))) for i in 2*np.pi*np.arange(n_ch)/n_ch))
     multichannel_data = get_multimode_test_data(channels = channels,
                                                 timebase = timebase,
                                                 noise = 0.5)
     new_data = multichannel_data.reduce_time([0,1.e-3], copy=False)
     self.assertTrue(new_data is multichannel_data)
示例#17
0
    def test_ORM_flucstrucs(self):
        """ check that flucstrucs can be saved to database"""
        n_ch = 10
        n_samples = 1024
        multichannel_data = get_multimode_test_data(
            channels=get_n_channels(n_ch),
            timebase=Timebase(np.arange(n_samples) * 1.e-6),
            noise=0.01)
        # produce a dataset of flucstrucs
        #print ">> ", multichannel_data.channels
        fs_data = multichannel_data.flucstruc(min_dphase=-2 * np.pi)
        print type(fs_data)
        #print list(fs_data)[0].dphase[0].channel_1
        #print '---'
        # save our dataset to the database
        fs_data.save()
        if pyfusion.orm_manager.IS_ACTIVE:
            session = pyfusion.orm_manager.Session()
            d1 = DataSet('test_dataset_1')
            d1.save()
            d2 = DataSet('test_dataset_2')
            d2.save()

            # get our dataset from database
            our_dataset = session.query(DataSet).order_by("id").first()
            self.assertEqual(our_dataset.created, fs_data.created)

            self.assertEqual(len([i for i in our_dataset.data]),
                             len(our_dataset))

            #check flucstrucs have freq, t0 and d_phase..
            #for i in our_dataset.data:
            #    print i
            #print 'w'
            #assert False

            #our guinea pig flucstruc:
            test_fs = our_dataset.pop()
            self.assertTrue(isinstance(test_fs.freq, float))
            self.assertTrue(isinstance(test_fs.t0, float))

            # now, are the phase data correct?

            self.assertTrue(isinstance(test_fs.dphase, BaseOrderedDataSet))
            self.assertEqual(len(test_fs.dphase), n_ch - 1)

            # what if we close the session and try again?

            session.close()
            session = pyfusion.orm_manager.Session()

            ds_again = session.query(DataSet).order_by("id").first()
            fs_again = list(ds_again)[0]
            """
示例#18
0
    def test_flucstruc_signals(self):
        # make sure that flucstruc derived from all singular values
        # gives back the original signal
        n_ch = 10
        n_samples = 1024
        multichannel_data = get_multimode_test_data(channels=get_n_channels(n_ch),
                                                    timebase = Timebase(np.arange(n_samples)*1.e-6),
                                                    noise = 0.01)
        svd_data = multichannel_data.svd()
        test_fs = FlucStruc(svd_data, range(len(svd_data.svs)), multichannel_data.timebase)

        assert_almost_equal(test_fs.signal, multichannel_data.signal)
示例#19
0
def get_probe_angles(input_data, closed=False):
    """  
    return a list of thetas for a given signal (timeseries) or a string that specifies it.
              get_probe_angles('W7X:W7X_MIRNOV_41_BEST_LOOP:(20180912,43)')

    This is a kludgey way to read coordinates.  Should be through acquisition.base or
    acquisition.'device' rather than looking up config directly
    """
    import pyfusion
    if isinstance(input_data, str):
        pieces = input_data.split(':')
        if len(pieces) == 3:
            dev_name, diag_name, shotstr = pieces
            shot_number = eval(shotstr)
            dev = pyfusion.getDevice(dev_name)
            data = dev.acq.getdata(shot_number, diag_name, time_range=[0, 0.1])
        else:
            from pyfusion.data.timeseries import TimeseriesData, Timebase, Signal
            from pyfusion.data.base import Channel, ChannelList, Coords
            input_data = TimeseriesData(Timebase([0, 1]), Signal([0, 1]))
            dev_name, diag_name = pieces
            # channels are amongst options
            opts = pyfusion.config.pf_options('Diagnostic', diag_name)
            chans = [
                pyfusion.config.pf_get('Diagnostic', diag_name, opt)
                for opt in opts if 'channel_' in opt
            ]
            # for now, assume config_name is some as name
            input_data.channels = ChannelList(
                *[Channel(ch, Coords('?', [0, 0, 0])) for ch in chans])

    Phi = np.array([
        2 * np.pi / 360 * float(
            pyfusion.config.get(
                'Diagnostic:{cn}'.format(
                    cn=c.config_name if c.config_name != '' else c.name),
                'Coords_reduced').split(',')[0]) for c in input_data.channels
    ])

    Theta = np.array([
        2 * np.pi / 360 * float(
            pyfusion.config.get(
                'Diagnostic:{cn}'.format(
                    cn=c.config_name if c.config_name != '' else c.name),
                'Coords_reduced').split(',')[1]) for c in input_data.channels
    ])

    if closed:
        Phi = np.append(Phi, Phi[0])
        Theta = np.append(Theta, Theta[0])
    return (dict(Theta=Theta, Phi=Phi))
示例#20
0
    def do_fetch(self):
        channel_length = int(self.length)
        outdata = np.zeros(1024 * 2 * 256 + 1)
        ##  !! really should put a wrapper around gethjdata to do common stuff
        #  outfile is only needed if the direct passing of binary won't work
        #  with tempfile.NamedTemporaryFile(prefix="pyfusion_") as outfile:
        # get in two steps to make debugging easier
        allrets = gethjdata.gethjdata(self.shot,
                                      channel_length,
                                      self.path,
                                      verbose=VERBOSE,
                                      opt=1,
                                      ierror=2,
                                      isample=-1,
                                      outdata=outdata,
                                      outname='')
        ierror, isample, getrets = allrets
        if ierror != 0:
            raise LookupError(
                'hj Okada style data not found for {s}:{c}'.format(
                    s=self.shot, c=self.path))

        ch = Channel(self.path, Coords('dummy', (0, 0, 0)))

        # the intent statement causes the out var to be returned in the result lsit
        # looks like the time,data is interleaved in a 1x256 array
        # it is fed in as real*64, but returns as real*32! (as per fortran decl)
        debug_(pyfusion.DEBUG,
               4,
               key='Heliotron_fetch',
               msg='after call to getdata')
        # timebase in secs (ms in raw data) - could add a preferred unit?
        # this is partly allowed for in savez_compressed, newload, and
        # for plotting, in the config file.
        # important that the 1e-3 be inside the Timebase()
        output_data = TimeseriesData(timebase=Timebase(
            1e-3 * getrets[1::2][0:isample]),
                                     signal=Signal(getrets[2::2][0:isample]),
                                     channels=ch)
        output_data.meta.update({'shot': self.shot})
        if pyfusion.VERBOSE > 0: print('HJ config name', self.config_name)
        output_data.config_name = self.config_name
        stprms = get_static_params(shot=self.shot, signal=self.path)
        if len(list(stprms)
               ) == 0:  # maybe this should be ignored - how do we use it?
            raise LookupError(
                ' failure to get params for {shot}:{path}'.format(
                    shot=self.shot, path=self.path))
        output_data.params = stprms
        return output_data
示例#21
0
    def do_fetch(self):
        delimiter = self.__dict__.get("delimiter", None)
        data = genfromtxt(self.filename.replace("(shot)", str(self.shot)),
                          unpack=True,
                          delimiter=delimiter)

        # len(data) is number of channels + 1 (timebase)
        n_channels = len(data) - 1

        ch_generator = (generic_ch(i) for i in range(n_channels))
        ch = ChannelList(*ch_generator)

        return TimeseriesData(timebase=Timebase(data[0]),
                              signal=Signal(data[1:]),
                              channels=ch)
示例#22
0
     def do_fetch(self):
         channel_length = int(self.length)
         outdata=np.zeros(1024*2*256+1)
         with tempfile.NamedTemporaryFile(prefix="pyfusion_") as outfile:
             getrets=gethjdata.gethjdata(self.shot,channel_length,self.path,
                                         VERBOSE, OPT,
                                         outfile.name, outdata)
         ch = Channel(self.path,
                      Coords('dummy', (0,0,0)))

         output_data = TimeseriesData(timebase=Timebase(getrets[1::2]),
                                 signal=Signal(getrets[2::2]), channels=ch)
         output_data.meta.update({'shot':self.shot})
         
         return output_data
示例#23
0
 def do_fetch(self):
     print(self.pointname)
     print(self.shot)
     if self.NC!=None:
         print(self.NC)
         t_name = '{}_time'.format(self.pointname)
         NC_vars = self.NC.variables.keys()
         if self.pointname in NC_vars:
             print('Reading cache!!!!')
             t_axis = self.NC.variables[t_name].data[:].copy()
             data = self.NC.variables[self.pointname].data[:].copy()
     else:
         tmp = self.acq.connection.get('ptdata2("{}",{})'.format(self.pointname, self.shot))
         data = tmp.data()
         tmp = self.acq.connection.get('dim_of(ptdata2("{}",{}))'.format(self.pointname, self.shot))
         t_axis = tmp.data()
         self.write_cache = True
     print(t_axis)
     print(data)
     coords = get_coords_for_channel(**self.__dict__)
     ch = Channel(self.pointname, coords)
     # con=MDS.Connection('atlas.gat.com::')
     # pointname = 'MPI66M067D'
     # shot = 164950
     # tmp = con.get('ptdata2("{}",{})'.format(pointname, shot))
     # dat = tmp.data()
     # tmp = con.get('dim_of(ptdata2("{}",{}))'.format(pointname, shot))
     # t = tmp.data()
     if self.NC!=None and self.write_cache:
         print self.pointname
         self.NC.createDimension(t_name, len(t_axis))
         f_time = self.NC.createVariable(t_name,'d',(t_name,))
         f_time[:] = +t_axis
         print('Wrote time')
         sig = self.NC.createVariable(self.pointname,'f',(t_name,))
         sig[:] = +data
         print('Wrote signal')
     output_data = TimeseriesData(timebase=Timebase(t_axis),
                             signal=Signal(data), channels=ch)
     # output_data = super(DIIIDDataFetcherPTdata, self).do_fetch()
     # coords = get_coords_for_channel(**self.__dict__)
     # ch = Channel(self.mds_path, coords)
     # output_data.channels = ch
     # output_data.meta.update({'shot':self.shot, 'kh':self.get_kh()})
     # print(ch)
     output_data.config_name = ch
     self.fetch_mode = 'ptdata'
     return output_data
示例#24
0
def get_tsd_from_node(fetcher, node):
    """Return pyfusion TimeSeriesData corresponding to an MDSplus signal node."""
    # TODO: load actual coordinates
    ch = Channel(fetcher.mds_path_components['nodepath'],
                 Coords('dummy', (0, 0, 0)))
    signal = Signal(node.data())
    dim = node.dim_of().data()
    # TODO: stupid hack,  the test signal has dim  of [[...]], real data
    # has [...].  Figure out  why. (...probably because  original signal
    # uses a build_signal function)
    if len(dim) == 1:
        dim = dim[0]
    timebase = Timebase(dim)
    output_data = TimeseriesData(timebase=timebase, signal=signal, channels=ch)
    output_data.meta.update({'shot': fetcher.shot})
    return output_data
示例#25
0
    def test_svd_data(self):
        n_ch = 10
        n_samples = 1024
        timebase = Timebase(np.arange(n_samples)*1.e-6)
        channels = ChannelList(*(Channel('ch_%02d' %i, Coords('cylindrical',(1.0,i,0.0))) for i in 2*np.pi*np.arange(n_ch)/n_ch))
        multichannel_data = get_multimode_test_data(channels = channels,
                                                    timebase = timebase,
                                                    noise = 0.5)

        test_svd = multichannel_data.svd()
        self.assertTrue(isinstance(test_svd, SVDData))
        self.assertEqual(len(test_svd.topos[0]), n_ch)
        self.assertEqual(len(test_svd.chronos[0]), n_samples)
        assert_array_almost_equal(test_svd.chrono_labels, timebase)
        for c_i, ch in enumerate(channels):
            self.assertEqual(ch, test_svd.channels[c_i])
示例#26
0
    def do_fetch(self):
        # evaluate filename list
        try:
            filenames = eval(self.__dict__.get("filenames", "[]"))
        except TypeError:
            # assume we have been given a list of filenames as a keyword argument, rather than
            # reading the config file.
            filenames = self.__dict__.get("filenames")

        data_array = []
        channel_names = []
        dtypes = []
        for fn_i, fn in enumerate(filenames):
            dt = eval(self.__dict__.get("dtype_%d" % (fn_i + 1), None))
            dtypes.append(dt)
            if fn.endswith('.bz2'):
                f = bz2.BZ2File(fn.replace("(shot)", str(self.shot)))
                data_array.append(np.fromstring(f.read(), dtype=dt))
                f.close()
            else:
                data_array.append(
                    np.fromfile(fn.replace("(shot)", str(self.shot)),
                                dtype=dt))
            channel_names.extend(
                [i for i in dt.names if i.startswith('channel_')])

        ch_generator = (named_ch(i) for i in channel_names)
        ch = ChannelList(*ch_generator)

        signal_data = np.zeros((len(channel_names), data_array[0].shape[0]),
                               dtype=dtypes[0][channel_names[0]])

        sig_counter = 0
        for d_i, d in enumerate(data_array):
            for ch_name in dtypes[d_i].names:
                if ch_name.startswith('channel_'):
                    signal_data[sig_counter, :] = d[ch_name]
                    sig_counter += 1

        tsd = TimeseriesData(timebase=Timebase(data_array[0]['timebase']),
                             signal=Signal(signal_data),
                             channels=ch)
        tsd.phase_pairs = self.__dict__.get("phase_pairs", None)
        if tsd.phase_pairs != None:
            tsd.phase_pairs = eval(tsd.phase_pairs)

        return tsd
示例#27
0
    def test_flucstruc_phases(PfTestCase):
        
        n_ch = 10
        n_samples = 5000
        timebase = Timebase(np.arange(n_samples)*1.e-6)
        channels = ChannelList(*(Channel('ch_%d' %i, Coords('cylindrical',(1.0,i,0.0))) for i in 2*np.pi*np.arange(n_ch)/n_ch))
        multichannel_data = get_multimode_test_data(channels = channels,
                                                    timebase = timebase,
                                                    noise = 0.5)

        data_reduced_time=multichannel_data.reduce_time([0,0.002]).subtract_mean().normalise(method='v',separate=True)

        fs_set=data_reduced_time.flucstruc()
        phases = []
        for fs in fs_set:
            for j in range(0,len(fs.dphase)):
                phases.append(fs.dphase[j].delta)
示例#28
0
    def do_fetch(self):
        print("Shot: {}\nPoint Name: {}".format(self.shot, self.pointname))
        if not hasattr(self, 'NC'):
            self.NC = None
        if self.NC is not None:
            #print(self.NC)
            t_name = '{}_time'.format(self.pointname)
            NC_vars = self.NC.variables.keys()
        else:
            NC_vars = []
        if self.pointname in NC_vars:
            print('   Pointname in NC cache, Reading...\n')
            t_axis = self.NC.variables[t_name].data[:].copy()
            data = self.NC.variables[self.pointname].data[:].copy()
            self.write_cache = False
        else:
            print('   Fetching from ptdata')
            tmp = self.acq.connection.get('ptdata2("{}",{})'.format(
                self.pointname, self.shot))
            data = tmp.data()
            tmp = self.acq.connection.get('dim_of(ptdata2("{}",{}))'.format(
                self.pointname, self.shot))
            t_axis = tmp.data()
            self.write_cache = True
        coords = get_coords_for_channel(**self.__dict__)
        ch = Channel(self.pointname, coords)
        if self.NC is not None and self.write_cache:

            print("\t Writing to NC file disabled temporarily.")

            #print('   Writing pointname to NC file\n')
            #self.NC.createDimension(t_name, len(t_axis))
            #f_time = self.NC.createVariable(t_name,'d',(t_name,))
            #f_time[:] = +t_axis
            # sig = self.NC.createVariable(self.pointname,'f',(t_name,))
            #sig[:] = +data
        output_data = TimeseriesData(timebase=Timebase(t_axis),
                                     signal=Signal(data),
                                     channels=ch)
        output_data.config_name = ch
        self.fetch_mode = 'ptdata'
        return output_data
示例#29
0
    def do_fetch(self):
        print self.shot, self.senal
        data_dim = tjiidata.dimens(self.shot, self.senal)
        if data_dim[0] < MAX_SIGNAL_LENGTH:
            data_dict = tjiidata.lectur(self.shot, self.senal, data_dim[0],
                                        data_dim[0], data_dim[1])
        else:
            raise ValueError, 'Not loading data to avoid segmentation fault in tjiidata.lectur'
        ch = Channel(self.senal, Coords('dummy', (0, 0, 0)))

        if self.invert == 'true':  #yuk - TODO: use boolean type from config
            s = Signal(-np.array(data_dict['y']))
        else:
            s = Signal(np.array(data_dict['y']))

        output_data = TimeseriesData(timebase=Timebase(data_dict['x']),
                                     signal=s,
                                     channels=ch)
        output_data.meta.update({'shot': self.shot})
        return output_data
示例#30
0
 def do_fetch(self):
     sig = self.conn.get(self.mds_path)
     dim = self.conn.get('DIM_OF(' + self.mds_path + ')')
     scl = 1.0
     coords = get_coords_for_channel(**self.__dict__)
     ch = Channel(self.config_name, coords)
     timedata = dim.data()
     output_data = TimeseriesData(timebase=Timebase(1e-9 * timedata),
                                  signal=scl * Signal(sig),
                                  channels=ch)
     output_data.meta.update({'shot': self.shot})
     if hasattr(self, 'mdsshot'):  # intended for checks - not yet used.
         output_data.mdsshot = self.mdsshot
     output_data.config_name = self.config_name
     output_data.utc = [timedata[0], timedata[-1]]
     #output_data.units = dat['units'] if 'units' in dat else ''
     debug_(pyfusion.DEBUG,
            level=1,
            key='W7M_do_fetch',
            msg='entering W7X MDS do_fetch')
     return (output_data)