Beispiel #1
0
    def __init__(self,
                 rld_dir='',
                 out_dir='',
                 site_filter='',
                 encryption_pass='',
                 token='',
                 header_type='standard',
                 export_type='meas',
                 export_format='csv_zipped',
                 **kwargs):
        if check_platform() == 'win32':
            self.platform = 'win32'
            self.folder_split = '\\'
            self.rld_dir = windows_folder_path(rld_dir)
            self.out_dir = windows_folder_path(out_dir)
        else:
            self.platform = 'linux'
            self.folder_split = '/'
            self.rld_dir = linux_folder_path(rld_dir)
            self.out_dir = linux_folder_path(out_dir)
        self.encryption_pass = encryption_pass
        self.export_format = export_format
        self.export_type = export_type
        self.site_filter = site_filter
        self.header_type = header_type
        self.token = token

        #import nrgApiUrl, token as tk
        self.NrgUrl = nrgApiUrl
        if len(tk) > 10 and len(self.token) < 10:
            self.token = tk
        if self.token == '':
            print(
                '\n\nA valid token is required to use the nrg_convert_api.\nPlease contact [email protected] for an API token'
            )
Beispiel #2
0
    def _copy_txt_file(self):
        """copy TXT file from self.ScaledData to self.out_dir"""

        try:
            txt_file_name = os.path.basename(self._filename)[:-4] + '.txt'
            txt_file_path = os.path.join(self.ScaledData,txt_file_name)
            out_path = self.file_path_joiner.join([self.out_dir,txt_file_name])

        except:
            print("could not do the needful")

        if self.platform == 'linux':
            out_path = linux_folder_path(self.out_dir) + txt_file_name
            txt_file_path = ''.join([self.wine_folder, '/NRG/ScaledData/',txt_file_name])

        try:
            shutil.copy(txt_file_path, out_path)
            
            try:
                os.remove(txt_file_path)
            except:
                print("{0} remains in {1}".format(txt_file_name, self.ScaledData))

        except:
            import traceback
            print(traceback.format_exc())            
            print("Unable to copy {0} to {1}".format(txt_file_name,self.out_dir))
Beispiel #3
0
 def __init__(self, rwd_dir='', out_dir='', filename='', encryption_pin='',
              sdr_path=r'C:/NRG/SymDR/SDR.exe',
              convert_type='meas', site_filter='', 
              wine_folder='~/.wine/drive_c/', **kwargs):
     if encryption_pin != '':
         self.command_switch = '/z'
     else:
         self.command_switch = '/q'
     self.encryption_pin = encryption_pin
     self.sdr_path = windows_folder_path(sdr_path)[:-1]
     self.root_folder = "\\".join(self.sdr_path.split('\\')[:-2])
     self.RawData = self.root_folder + '\\RawData\\'
     self.ScaledData = self.root_folder + '\\ScaledData\\'
     self.site_filter = site_filter
     self.rwd_dir = windows_folder_path(rwd_dir) # rwd_dir must be in Windows format, even if using Wine
     self.platform = check_platform()
     if self.platform == 'win32':
         self.out_dir = windows_folder_path(out_dir)
         self.file_path_joiner = '\\'            
         pass
     else:
         self.out_dir = linux_folder_path(out_dir)
         self.check_sdr()
         self.file_path_joiner = '/'
     if filename != '':
         self.filename = filename
         self.single_file()
     self.wine_folder = wine_folder
Beispiel #4
0
 def list_files(self):
     """
     get list of files in rwd_dir
     """
     self.dir_paths = []
     self.rwd_file_list = []
     if self.platform == 'win32':
         walk_path = self.rwd_dir
     else:
         walk_path = linux_folder_path(self.rwd_dir)
     for dirpath, subdirs, files in os.walk(walk_path):
         self.dir_paths.append(dirpath)
         for x in files:
             if x.startswith(self.site_filter) and x.lower().endswith('.rwd'):
                 self.rwd_file_list.append(x)
Beispiel #5
0
    def __init__(self, rwd_dir='', out_dir='', filename='', encryption_pin='',
                 sdr_path=r'C:/NRG/SymDR/SDR.exe',
                 convert_type='meas', file_filter='', 
                 wine_folder='~/.wine/drive_c/', 
                 use_site_file=False, raw_mode=False, progress_bar=True, show_result=True, **kwargs):

        if encryption_pin != '':
            self.command_switch = '/z' # noqueue with pin
        else:
            self.command_switch = '/q' # noqueue (logger params)
        if use_site_file == True:
            self.command_switch = '/s' # silent (site file params)
        if raw_mode == True:
            self.command_switch = '/r' # silent (site file params)

        self.filename = filename
        self.progress_bar = progress_bar
        self.encryption_pin = encryption_pin
        self.sdr_path = windows_folder_path(sdr_path)[:-1]
        self.root_folder = "\\".join(self.sdr_path.split('\\')[:-2])
        self.RawData = self.root_folder + '\\RawData\\'
        self.ScaledData = self.root_folder + '\\ScaledData\\'
        self.file_filter = file_filter

        if 'site_filter' in kwargs and file_filter == '':
            self.file_filter = kwargs.get('site_filter')

        self.rwd_dir = windows_folder_path(rwd_dir) # rwd_dir must be in Windows format, even if using Wine
        self.show_result = show_result
        self.platform = check_platform()
        self.wine_folder = wine_folder
        self.check_sdr()

        if self.platform == 'win32':
            self.out_dir = windows_folder_path(out_dir)
            self.file_path_joiner = '\\'            
        else:
            self.out_dir = linux_folder_path(out_dir)
            self.file_path_joiner = '/'
        
        if self.filename:
            self.counter = 1
            self.rwd_dir = os.path.dirname(self.filename)
            self.file_filter = os.path.basename(self.filename)
            self.convert()
Beispiel #6
0
 def copy_txt_file(self):
     """
     copy TXT file from self.ScaledData to self.out_dir
     """
     try:
         txt_file_name = self._filename.split('\\')[-1][:-4] + '.txt'
         txt_file_path = "\\".join([self.ScaledData,txt_file_name])
         out_path = self.file_path_joiner.join([self.out_dir,txt_file_name])
     except:
         print("could not do the needful")
     if self.platform == 'linux':
         out_path = linux_folder_path(self.out_dir) + txt_file_name
         txt_file_path = ''.join([self.wine_folder, 'NRG/ScaledData/',txt_file_name])
     try:
         shutil.copy(txt_file_path, out_path)
         try:
             os.remove(txt_file_path)
         except:
             print("{0} remains in {1}".format(txt_file_name, self.ScaledData))
     except:
         print("Unable to copy {0} to {1}".format(txt_file_name,self.out_dir))
Beispiel #7
0
 def concat_txt(self,
                output_txt=False,
                txt_dir='',
                out_file='',
                file_type='meas',
                header='standard',
                site_filter='',
                **kwargs):
     """
     Will concatenate all text files in the txt_dir that match
     the site_filter argument. Note these are both blank by default.
     """
     self.site_filter = site_filter
     self.file_type = file_type
     if check_platform() == 'win32':
         self.txt_dir = windows_folder_path(txt_dir)
     else:
         self.txt_dir = linux_folder_path(txt_dir)
     first_file = True
     files = sorted(glob(self.txt_dir + '*.txt'))
     for f in files:
         if self.site_filter in f and self.file_type in f:
             print("Adding {0} ...\t\t".format(f), end="", flush=True)
             if first_file == True:
                 first_file = False
                 try:
                     base = sympro_txt_read(f)
                     print("[OK]")
                     pass
                 except IndexError:
                     print('Only standard SymPRO headertypes accepted')
                     break
             else:
                 file_path = f
                 try:
                     s = sympro_txt_read(file_path)
                     base.data = base.data.append(s.data, sort=False)
                     print("[OK]")
                 except:
                     print("[FAILED]")
                     print("could not concat {0}".format(file_path))
                     pass
         else:
             pass
     if output_txt == True:
         if out_file == "":
             out_file = datetime.today().strftime(
                 "%Y-%m-%d") + "_SymPRO.txt"
         base.data.to_csv(txt_dir + out_file, sep=',', index=False)
         self.out_file = out_file
     try:
         self.ch_info = s.ch_info
         self.ch_list = s.ch_list
         self.data = base.data.drop_duplicates(subset=['Timestamp'],
                                               keep='first')
         self.head = s.head
         self.site_info = s.site_info
     except UnboundLocalError:
         print("No files match to contatenate.")
         return None
     self.ch_info = s.ch_info
     self.ch_list = s.ch_list
     self.data = base.data.drop_duplicates(subset=['Timestamp'],
                                           keep='first')
     self.head = s.head
     self.site_info = s.site_info
Beispiel #8
0
 def concat(self, output_txt=False, out_file='', file_filter=''):
     """
     combine exported rwd files (in txt format)
     """
     self.file_filter = file_filter
     if check_platform() == 'win32':
         self.txt_dir = windows_folder_path(self.txt_dir)
     else:
         self.txt_dir = linux_folder_path(self.txt_dir)
     first_file = True
     files = sorted(glob(self.txt_dir + '*.txt'))
     self.file_count = len(files)
     self.pad = len(str(self.file_count)) + 1
     # probably create dataframe of any channel changes
     ## presence of log file indicates sensor change? maybe?
     self.counter = 1
     for f in files:
         if self.file_filter in f:
             print("Adding  {0}/{1}  {2}  ...  ".format(
                 str(self.counter).rjust(self.pad),
                 str(self.file_count).ljust(self.pad), f),
                   end="",
                   flush=True)
             if first_file == True:
                 first_file = False
                 try:
                     base = read_text_data(filename=f,
                                           data_type=self.data_type,
                                           file_filter=self.file_filter,
                                           file_ext=self.file_ext,
                                           sep=self.sep)
                     print("[OK]")
                     pass
                 except IndexError:
                     print('Only standard headertypes accepted')
                     break
             else:
                 file_path = f
                 try:
                     s = read_text_data(filename=f,
                                        data_type=self.data_type,
                                        file_filter=self.file_filter,
                                        file_ext=self.file_ext,
                                        sep=self.sep)
                     base.data = base.data.append(s.data, sort=False)
                     print("[OK]")
                 except:
                     print("[FAILED]")
                     print("could not concat {0}".format(file_path))
                     pass
         else:
             pass
         self.counter += 1
     if output_txt == True:
         if out_file == "":
             out_file = datetime.today().strftime(
                 "%Y-%m-%d") + "_SymPRO.txt"
         base.data.to_csv(txt_dir + out_file, sep=',', index=False)
         self.out_file = out_file
     try:
         self.ch_info = s.ch_info
         self.ch_list = s.ch_list
         self.data = base.data.drop_duplicates(
             subset=[self.header_sections['data_header']], keep='first')
         #self.head = s.head
         self.site_info = s.site_info
     except UnboundLocalError:
         print("No files match to contatenate.")
         return None
     self.ch_info = s.ch_info
     self.ch_list = s.ch_list
     self.data = base.data.drop_duplicates(
         subset=[self.header_sections['data_header']], keep='first')
     #self.head = s.head
     self.site_info = s.site_info
Beispiel #9
0
    def concat_txt(self,
                   txt_dir='',
                   output_txt=False,
                   out_file='',
                   file_filter='',
                   file_filter2='',
                   start_date='1970-01-01',
                   end_date='2150-12-31',
                   progress_bar=True):
        """
        """
        from glob import glob

        self.txt_dir = txt_dir
        self.output_txt = output_txt
        self.out_file = out_file
        self.file_filter = file_filter
        self.file_filter2 = file_filter2
        self.start_date = start_date
        self.end_date = end_date

        if check_platform() == 'win32':
            self.txt_dir = windows_folder_path(txt_dir)
        else:
            self.txt_dir = linux_folder_path(txt_dir)
        first_file = True
        files = [
            f for f in sorted(glob(self.txt_dir + "*"))\
            if self.file_filter in f and self.file_filter2 in f\
            and date_check(self.start_date, self.end_date, f)
        ]
        self.file_count = len(files)
        self.pad = len(str(self.file_count))
        self.counter = 1
        self.start_time = datetime.now()
        for f in files:
            if self.file_filter in f and self.file_filter2 in f:
                if progress_bar:
                    draw_progress_bar(self.counter, self.file_count,
                                      self.start_time)
                else:
                    print("Adding {0}/{1}  ...  {2} ".format(
                        str(self.counter).rjust(self.pad),
                        str(self.file_count).ljust(self.pad), f),
                          end="",
                          flush=True)
                if first_file == True:
                    first_file = False
                    try:
                        base = spidar_data_read(f)
                        if progress_bar == False: print("[OK]")
                        pass
                    except IndexError:
                        print('Only standard Spidar headertypes accepted')
                        break
                else:
                    file_path = f
                    try:
                        s = spidar_data_read(file_path)
                        base.data = base.data.append(s.data, sort=False)
                        if progress_bar == False: print("[OK]")
                    except Exception as e:
                        if progress_bar == False: print("[FAILED]")
                        print("could not concat {0}".format(file_path))
                        print(e)
                        pass
            else:
                pass
            self.counter += 1
        if out_file != "":
            self.out_file = out_file
        if output_txt == True:
            base.data.to_csv(txt_dir + out_file, sep=',', index=False)

        try:
            self.base = base
            self.heights = base.heights
            self.data = base.data.drop_duplicates(subset=['Timestamp'],
                                                  keep='first')
            self.data.reset_index(drop=True, inplace=True)
        except Exception as e:
            print("No files match to contatenate.")
            print(e)
            return None
Beispiel #10
0
    def concat(self, output_txt=False, out_file='', file_filter='', filter2='', progress_bar=True):
        """combine exported rwd files (in txt format)

        parameters
        ----------
        output_txt : bool
            set to True to save a concatenated text file
        out_file : str
            filepath, absolute or relative
        file_filter : str
        filter2 : str
        progress_bar : bool
        """
        self.file_filter = file_filter

        if self.filter2 == '':
            self.filter2 = filter2

        if check_platform() == 'win32':
            self.txt_dir = windows_folder_path(self.txt_dir)
        else:
            self.txt_dir = linux_folder_path(self.txt_dir)

        first_file = True
        files = sorted(glob(self.txt_dir + '*.txt'))

        self.file_count = len(files)
        self.pad = len(str(self.file_count)) + 1

        self.counter = 1
        self.start_time = datetime.now()

        for f in files:

            if self.file_filter in f and self.filter2 in f:

                if progress_bar:
                    draw_progress_bar(self.counter, self.file_count, self.start_time)
                else:
                    print("Adding  {0}/{1}  {2}  ...  ".format(str(self.counter).rjust(self.pad),str(self.file_count).ljust(self.pad),f), end="", flush=True)

                if first_file == True:
                    first_file = False

                    try:
                        base = read_text_data(filename=f,data_type=self.data_type,
                                                   file_filter=self.file_filter, file_ext=self.file_ext,
                                                   sep=self.sep)
                        if progress_bar != True: print("[OK]")
                        pass

                    except IndexError:
                        print('Only standard headertypes accepted')
                        break

                else:
                    file_path = f
                    try:
                        s = read_text_data(filename=f,data_type=self.data_type,
                                           file_filter=self.file_filter, file_ext=self.file_ext,
                                           sep=self.sep)
                        base.data = base.data.append(s.data, sort=False)
                        if progress_bar != True: print("[OK]")
                    except:
                        if progress_bar != True: print("[FAILED]")
                        print("could not concat {0}".format(file_path))
                        pass
            else:
                pass
            self.counter += 1

        if output_txt == True:

            if out_file == "":
                out_file = f"{self.data_type}_" + datetime.today().strftime("%Y-%m-%d") + ".txt"
            base.data.to_csv(out_file, sep=',', index=False)
            self.out_file = out_file

        try:
            self.ch_info = s.ch_info
            self.ch_list = s.ch_list
            self.data = base.data.drop_duplicates(subset=[self.header_sections['data_header']], keep='first')
            self.head = s.head
            self.site_info = s.site_info
            self.filename = s.filename
            self.site_number = self.filename.split("\\")[-1][:4]
            self.format_rwd_site_data()

        except UnboundLocalError:
            print("No files match to contatenate.")
            return None
Beispiel #11
0
    def concat_txt(self,
                   txt_dir='',
                   file_type='meas',
                   file_filter='',
                   filter2='',
                   start_date='1970-01-01',
                   end_date='2150-12-31',
                   ch_details=False,
                   output_txt=False,
                   out_file='',
                   progress_bar=True,
                   **kwargs):
        """Will concatenate all text files in the txt_dir
        
        files must match the site_filter argument. Note these are both blank by default.

        Parameters
        ----------
        txt_dir : str
            directory holding txt files
        file_type : str
            type of export (meas, event, comm, sample, etc...)
        file_filter : str
            text filter for txt files, like site number, etc.
        filter2 : str
            secondary text filter
        start_date : str
            for filtering files to concat based on date "YYYY-mm-dd"
        end_date : str
            for filtering files to concat based on date "YYYY-mm-dd"
        ch_details : bool
            show additional info in ch_info dataframe
        output_txt : bool
            create a txt output of data df
        out_file : str
            filename to write data dataframe too if output_txt = True
        progress_bar : bool
            show bar on concat [True] or list of files [False]

        Returns
        ---------
        ch_info : obj
            pandas dataframe of ch_list (below) pulled out of file with sympro_txt_read.arrange_ch_info()
        ch_list : list 
            list of channel info; can be converted to json w/ import json ... json.dumps(fut.ch_info)
        data : obj
            pandas dataframe of all data
        head : obj
            lines at the top of the txt file..., used when rebuilding timeshifted files
        site_info : obj
            pandas dataframe of site information
        logger_sn : str
        ipack_sn : str
        logger_type : str
        ipack_type : str
        latitude : float
        longitude : float
        elevation : int
        site_number : str
        site_description : str
        start_date : str
        txt_file_names : list
            list of files included in concatenation

        Examples
        --------
        Read files into nrgpy reader object

        >>> import nrgpy
        >>> reader = nrgpy.sympro_txt_read()
        >>> reader.concat_txt(
                txt_dir='/path/to/txt/files/',
                file_filter='123456', # site 123456
                start_date='2020-01-01',
                end_date='2020-01-31',
            )
        Time elapsed: 2 s | 33 / 33 [=============================================] 100%	
        Queue processed
        >>> reader.logger_sn
        '820600019'
        >>> reader.ch_info
         	Bearing: 	Channel: 	Description: 	Effective Date: 	Height: 	Offset: 	Scale Factor: 	Serial Number: 	Type: 	Units:
        0 	50.00 	    1 	        NRG S1 	        2020-01-31 00:00:00 	33.00 	0.13900 	0.09350 	    94120000059 	Anemometer 	m/s
        1 	230.00 	    2 	        NRG S1 	        2020-01-31 00:00:00 	0.00 	0.13900 	0.09350 	    94120000058 	Anemometer 	m/s
        2 	50.00 	    3 	        NRG S1 	        2020-01-31 00:00:00 	22.00 	0.13900 	0.09350 	    94120000057 	Anemometer 	m/s
        3 	230.00 	    4 	        NRG 40C Anem 	2020-01-31 00:00:00 	22.00 	0.35000 	0.76500 	    179500324860 	Anemometer 	m/s
        4 	50.00 	    5 	        NRG 40C Anem 	2020-01-31 00:00:00 	12.00 	0.35000 	0.76500 	    179500324859 	Anemometer 	m/s
        5 	230.00 	    6 	        NRG S1 	        2020-01-31 00:00:00 	12.00 	0.13900 	0.09350 	    94120000056 	Anemometer 	m/s
        6 	320.00 	    13 	        NRG 200M Vane 	2020-01-31 00:00:00 	32.00 	-1.46020 	147.91100 	    10700000125 	Vane 	        Deg
        7 	320.00 	    14 	        NRG 200M Vane 	2020-01-31 00:00:00 	21.00 	-1.46020 	147.91100 	    10700000124 	Vane 	        Deg
        8 	0.00 	    15 	        NRG T60 Temp 	2020-01-31 00:00:00 	34.00 	-40.85550 	44.74360 	    9400000705          Analog          C
        9 	0.00 	    16 	        NRG T60 Temp 	2020-01-31 00:00:00 	2.00 	-40.85550 	44.74360 	    9400000xxx          Analog          C
        10 	0.00 	    17 	        NRG RH5X Humi 	2020-01-31 00:00:00 	0.00 	0.00000 	20.00000 	    NaN 	        Analog          %RH
        11 	0.00 	    20 	        NRG BP60 Baro 	2020-01-31 00:00:00 	0.00 	495.27700 	243.91400 	    NaN 	        Analog          hPa
        12 	0.00 	    21 	        NRG BP60 Baro 	2020-01-31 00:00:00 	2.00 	495.04400 	244.23900 	    9396FT1937          Analog  	hPa
        """

        if 'site_filter' in kwargs and file_filter == '':
            self.file_filter = kwargs.get('site_filter')
        else:
            self.file_filter = file_filter

        self.ch_details = ch_details
        self.start_date = start_date
        self.end_date = end_date
        self.filter2 = filter2
        self.file_type = file_type
        self.txt_file_names = []

        if check_platform() == 'win32':
            self.txt_dir = windows_folder_path(txt_dir)
        else:
            self.txt_dir = linux_folder_path(txt_dir)

        first_file = True

        files = [
            f for f in sorted(glob(self.txt_dir + '*.txt'))\
            if self.file_filter in f and self.filter2 in f\
            and date_check(self.start_date, self.end_date, f)
        ]

        self.file_count = len(files)
        self.pad = len(str(self.file_count))
        self.counter = 1
        self.start_time = datetime.now()

        for f in files:

            if self.file_filter in f and self.file_type in f and self.filter2 in f:

                if progress_bar:
                    draw_progress_bar(self.counter, self.file_count,
                                      self.start_time)
                else:
                    print("Adding {0}/{1} ... {2} ... ".format(
                        str(self.counter).rjust(self.pad),
                        str(self.file_count).ljust(self.pad),
                        os.path.basename(f)),
                          end="",
                          flush=True)

                if first_file == True:
                    first_file = False

                    try:
                        base = sympro_txt_read(f)
                        if progress_bar != True: print("[OK]")
                        self.txt_file_names.append(os.path.basename(f))
                    except IndexError:
                        print('Only standard SymPRO headertypes accepted')
                        break

                else:
                    file_path = f

                    try:
                        s = sympro_txt_read(file_path,
                                            ch_details=self.ch_details)
                        base.data = base.data.append(s.data, sort=False)
                        if progress_bar != True: print("[OK]")
                        self.txt_file_names.append(os.path.basename(f))

                    except:
                        if progress_bar != True: print("[FAILED]")
                        print("could not concat {0}".format(
                            os.path.basename(file_path)))
                        pass
            else:
                pass
            self.counter += 1

        if out_file != "":
            self.out_file = out_file

        if output_txt == True:
            base.data.to_csv(os.path.join(txt_dir, out_file),
                             sep=',',
                             index=False)

        try:
            self.ch_info = s.ch_info
            self.ch_list = s.ch_list
            self.array = s.array
            self.data = base.data.drop_duplicates(subset=['Timestamp'],
                                                  keep='first')
            self.data.reset_index(drop=True, inplace=True)
            self.head = s.head
            self.site_info = s.site_info
            self.format_site_data()
            print("\n")

        except UnboundLocalError:
            print("No files match to contatenate.")
            return None
Beispiel #12
0
    def __init__(self, rld_dir='', out_dir='', filename='', site_filter='',
                 filter2 = '', start_date='1970-01-01', end_date='2150-12-31',
                 client_id='', client_secret='', token='', 
                 encryption_pass='', header_type='standard', nec_file='',
                 export_type='meas', export_format='csv_zipped', **kwargs):    
        
        if check_platform() == 'win32':
            self.platform = 'win32'
            self.folder_split = '\\'
            self.rld_dir = windows_folder_path(rld_dir)
            self.out_dir = windows_folder_path(out_dir)
        
        else:
            self.platform = 'linux'
            self.folder_split = '/'
            self.rld_dir = linux_folder_path(rld_dir)
            self.out_dir = linux_folder_path(out_dir)
        
        self.encryption_pass = encryption_pass
        self.export_format = export_format
        self.export_type = export_type
        self.site_filter = site_filter
        
        if 'file_filter' in kwargs and site_filter == '':
            self.file_filter = kwargs.get('file_filter')
            self.site_filter = self.file_filter
        
        self.filter2 = filter2
        self.start_date = start_date
        self.end_date = end_date
        self.header_type = header_type
        self.nec_file = nec_file
        self.token = token
        self.client_id = client_id
        self.client_secret = client_secret
        
        affirm_directory(self.out_dir)

        if self.client_id and self.client_secret:

            self.session_token, self.session_start_time = maintain_session_token(client_id=self.client_id, client_secret=self.client_secret)
        
            if self.session_token:
                self.convert_url = ConvertServiceUrl
            else:
                print("Unable to get session token for conversion")                

        else:
            self.convert_url = nrgApiUrl
            print("[Deprecation warning]\n------------------------------------------------------")
            print(" NRGPy Convert API will require a client_id and \n client_secret starting March 15, 2020.")
            print(" Please contact [email protected] or visit \n https://services.nrgsystems.com to sign up.")
            print("------------------------------------------------------\n")            
    
        if len(tk) > 10 and len(self.token) < 10:
            self.token = tk

        if (not self.token) and (not self.client_id) and (not self.client_secret):
            print('[Access error] Valid credentials are required.\nPlease contact [email protected] or visit \nhttps://services.nrgsystems.com for API access')

        if filename != '':
            self.pad = 1
            self.counter = 1
            self.raw_count = 1
            self.progress_bar=False
            self.start_time = datetime.now()
            self.single_file(filename)
Beispiel #13
0
 def concat_txt(self,
                output_txt=False,
                txt_dir='',
                out_file='',
                file_type='meas',
                header='standard',
                file_filter='',
                **kwargs):
     """
     Will concatenate all text files in the txt_dir that match
     the site_filter argument. Note these are both blank by default.
     """
     if 'ch_details' in kwargs:
         self.ch_details = kwargs.get('ch_details')
     if 'site_filter' in kwargs and file_filter == '':
         self.file_filter = kwargs.get('site_filter')
     else:
         self.file_filter = file_filter
     self.file_type = file_type
     if check_platform() == 'win32':
         self.txt_dir = windows_folder_path(txt_dir)
     else:
         self.txt_dir = linux_folder_path(txt_dir)
     first_file = True
     files = [
         f for f in sorted(glob(self.txt_dir + '*.txt'))
         if self.file_filter in f
     ]
     self.file_count = len(files)
     self.pad = len(str(self.file_count))
     self.counter = 1
     for f in files:
         if self.file_filter in f and self.file_type in f:
             print("Adding {0}/{1}  ...  {2}".format(
                 str(self.counter).rjust(self.pad),
                 str(self.file_count).ljust(self.pad), f),
                   end="",
                   flush=True)
             if first_file == True:
                 first_file = False
                 try:
                     base = sympro_txt_read(f)
                     print("[OK]")
                     pass
                 except IndexError:
                     print('Only standard SymPRO headertypes accepted')
                     break
             else:
                 file_path = f
                 try:
                     s = sympro_txt_read(file_path,
                                         ch_details=self.ch_details)
                     base.data = base.data.append(s.data, sort=False)
                     print("[OK]")
                 except:
                     print("[FAILED]")
                     print("could not concat {0}".format(file_path))
                     pass
         else:
             pass
         self.counter += 1
     if out_file != "":
         self.out_file = out_file
     if output_txt == True:
         base.data.to_csv(txt_dir + out_file, sep=',', index=False)
     try:
         self.ch_info = s.ch_info
         self.ch_list = s.ch_list
         self.array = s.array
         self.data = base.data.drop_duplicates(subset=['Timestamp'],
                                               keep='first')
         self.data.reset_index(drop=True, inplace=True)
         self.head = s.head
         self.site_info = s.site_info
     except UnboundLocalError:
         print("No files match to contatenate.")
         return None