def __init__(self, name, mclass="Measurement"):

        # TODO: option to load measurements

        self.name = name  # name to identify the measurement (meta information)

        self.mclass = mclass  # short token to identify the kind of
        # measurement

        self.save_filebase = self.mclass  # the prefix for all saved
        # data
        _bp = dh.dummy_qtlab_measurement(self.mclass + "_" + self.name)
        self.save_folder, _tmp = os.path.split(_bp)
        self.dataset_idx = 0

        self.measurement_devices = []
Exemple #2
0
    def __init__(self, name, mclass='Measurement'):

        # TODO: option to load measurements

        self.name = name  # name to identify the measurement (meta information)

        self.mclass = mclass  # short token to identify the kind of
        # measurement

        self.save_filebase = self.mclass  # the prefix for all saved
        # data
        _bp = dh.dummy_qtlab_measurement(self.mclass + '_' + self.name)
        self.save_folder, _tmp = os.path.split(_bp)
        self.dataset_idx = 0

        self.measurement_devices = []
    def save_dataset(self, name='', folder=None, data={}, formats=['npz'], do_plot=True,
            files=[], txt={}, idx_increment=True, script_save_stack_depth=2,
            index_pad=3):
        """
        Automatic data saving for measurements. On what is actually plotted,
        and how, see the tools.data_handling module.

        Arguments (all optional):

            folder (None) : path
                where to save the data. if not given, we use the previousely set
                folder; if not has been set yet, a new one with the usal naming
                scheme is created.

            data ( {} ) : dict
                expects data in the form { 'array_name' : narray, }
                will be saved in one npz data

            formats ( [ 'npz', ] ) : list of format identifiers
                only 'npz' supported at the moment
                
            do_plot ( True ) : bool
                whether to autoplot the saved data. see the data_handling
                docs for details, we only pass this on to the format-resp.
                save-method there

            files ( [] ) : list of file paths
                files in this list are copied to the save folder

            txt ( {} ) : dict of text comments
                any entry in the form { 'filename' : 'content', }
                will be saved as txt file in the save folder
        """

        if folder != None:
            if not os.path.isdir(folder):
                try:
                    os.makedirs(folder)
                    self.save_folder = os.path.abspath(folder)
                except:
                    print 'invalid save directory! will autodetermine one.'
            else:
                self.save_folder = os.path.abspath(folder)
        
        if self.save_folder == '':
            _bp = dh.dummy_qtlab_measurement(self.mclass+'_'+self.name)
            self.save_folder, _tmp = os.path.split(_bp)

        
        # this is the basepath for the files we're about to save
        self.basepath = os.path.join( self.save_folder, 
                (self.save_filebase+'-%.'+str(index_pad)+'d') % \
                        self.dataset_idx )

        # we'd like to have the option to save several datasets from one
        # measurement run, where they share the same statistics, parameters,
        # and meta information, and just the data is distributed over several
        # files
        if name != '':
            self.data_basepath = self.basepath + '_%s' % name    
        else:
            self.data_basepath = self.basepath        
        
        if 'npz' in formats:
            dh.save_npz_data(self.data_basepath, filepath=self.save_folder, 
                    do_plot=do_plot, **data)
        else:
            print 'no supported save formats given, data not saved.'

        for txtfile in txt:
            t = open(self.basepath + '_%s.txt' % txtfile, 'w')
            t.write(txt[txtfile])
            t.close()

        # to keep our folders clean, save extra data in a supplementaries
        # folder; we usually assume this is only one set per measurement
        # (and thus folder)
        supplfolder = os.path.join(self.save_folder, (SUPPL_DIRNAME+'-%.'+ \
                str(index_pad)+'d') % (self.dataset_idx))
        if not len(files) > 0 and os.path.isdir(supplfolder):
            try:
                os.makedirs(supplfolder)
            except:
                print 'could not create separate folder for supplementals!'
                supplfolder = self.save_folder

        # auto copy script files to suppl folder
        for i in range(script_save_stack_depth):
            shutil.copy(inspect.stack()[i][1], self.save_folder)

        # copy the given files to the folder
        for f in files:
            shutil.copy(f, supplfolder+'/')

        # save params in a table ; we take all variables of types
        # int, float, numpy.ndarray, str
        params = [ p for p in self.__dict__ if type(getattr(self, p)) in \
                [ int, float, str, np.ndarray ] ]
       
        params_dict = {}
        for p in params:
            params_dict[p] = getattr(self, p)
        params_pickle = open(self.basepath+'_%s.pkl' % PARAMS_FNAME, 'wb')
        pickle.dump(params_dict, params_pickle)
        params_pickle.close()
  
        
        # save data from measurement devices
        for i,device in enumerate(self.measurement_devices):
            devicefolder = os.path.join(self.save_folder, (device.name+\
                    '-%.'+str(index_pad)+'d') % (self.dataset_idx))
            try:
                os.makedirs(devicefolder)
            except:
                print 'could not create data folder for instrument %s' % \
                        device.name
                devicefolder = self.save_folder

            savdat = device.get_save_data()
            for data_set in savdat:
                np.savez(os.path.join(devicefolder, data_set), 
                        **savdat[data_set])
        
        
        if idx_increment:
            self.dataset_idx += 1
        return
Exemple #4
0
    def save_dataset(self,
                     name='',
                     folder=None,
                     data={},
                     formats=['npz'],
                     do_plot=True,
                     files=[],
                     txt={},
                     idx_increment=True,
                     script_save_stack_depth=2,
                     index_pad=3):
        """
        Automatic data saving for measurements. On what is actually plotted,
        and how, see the tools.data_handling module.

        Arguments (all optional):

            folder (None) : path
                where to save the data. if not given, we use the previousely set
                folder; if not has been set yet, a new one with the usal naming
                scheme is created.

            data ( {} ) : dict
                expects data in the form { 'array_name' : narray, }
                will be saved in one npz data

            formats ( [ 'npz', ] ) : list of format identifiers
                only 'npz' supported at the moment
                
            do_plot ( True ) : bool
                whether to autoplot the saved data. see the data_handling
                docs for details, we only pass this on to the format-resp.
                save-method there

            files ( [] ) : list of file paths
                files in this list are copied to the save folder

            txt ( {} ) : dict of text comments
                any entry in the form { 'filename' : 'content', }
                will be saved as txt file in the save folder
        """

        if folder != None:
            if not os.path.isdir(folder):
                try:
                    os.makedirs(folder)
                    self.save_folder = os.path.abspath(folder)
                except:
                    print 'invalid save directory! will autodetermine one.'
            else:
                self.save_folder = os.path.abspath(folder)

        if self.save_folder == '':
            _bp = dh.dummy_qtlab_measurement(self.mclass + '_' + self.name)
            self.save_folder, _tmp = os.path.split(_bp)

        # this is the basepath for the files we're about to save
        self.basepath = os.path.join( self.save_folder,
                (self.save_filebase+'-%.'+str(index_pad)+'d') % \
                        self.dataset_idx )

        # we'd like to have the option to save several datasets from one
        # measurement run, where they share the same statistics, parameters,
        # and meta information, and just the data is distributed over several
        # files
        if name != '':
            self.data_basepath = self.basepath + '_%s' % name
        else:
            self.data_basepath = self.basepath

        if 'npz' in formats:
            dh.save_npz_data(self.data_basepath,
                             filepath=self.save_folder,
                             do_plot=do_plot,
                             **data)
        else:
            print 'no supported save formats given, data not saved.'

        for txtfile in txt:
            t = open(self.basepath + '_%s.txt' % txtfile, 'w')
            t.write(txt[txtfile])
            t.close()

        # to keep our folders clean, save extra data in a supplementaries
        # folder; we usually assume this is only one set per measurement
        # (and thus folder)
        supplfolder = os.path.join(self.save_folder, (SUPPL_DIRNAME+'-%.'+ \
                str(index_pad)+'d') % (self.dataset_idx))
        if not len(files) > 0 and os.path.isdir(supplfolder):
            try:
                os.makedirs(supplfolder)
            except:
                print 'could not create separate folder for supplementals!'
                supplfolder = self.save_folder

        # auto copy script files to suppl folder
        for i in range(script_save_stack_depth):
            shutil.copy(inspect.stack()[i][1], self.save_folder)

        # copy the given files to the folder
        for f in files:
            shutil.copy(f, supplfolder + '/')

        # save params in a table ; we take all variables of types
        # int, float, numpy.ndarray, str
        params = [ p for p in self.__dict__ if type(getattr(self, p)) in \
                [ int, float, str, np.ndarray ] ]

        params_dict = {}
        for p in params:
            params_dict[p] = getattr(self, p)
        params_pickle = open(self.basepath + '_%s.pkl' % PARAMS_FNAME, 'wb')
        pickle.dump(params_dict, params_pickle)
        params_pickle.close()

        # save data from measurement devices
        for i, device in enumerate(self.measurement_devices):
            devicefolder = os.path.join(self.save_folder, (device.name+\
                    '-%.'+str(index_pad)+'d') % (self.dataset_idx))
            try:
                os.makedirs(devicefolder)
            except:
                print 'could not create data folder for instrument %s' % \
                        device.name
                devicefolder = self.save_folder

            savdat = device.get_save_data()
            for data_set in savdat:
                np.savez(os.path.join(devicefolder, data_set),
                         **savdat[data_set])

        if idx_increment:
            self.dataset_idx += 1
        return