Esempio n. 1
0
    def doRollover(self):
        if self.stream and not self.stream.closed:
            self.stream.close()

        # Get the time that this sequence started at and make it a TimeTuple
        rollover_timestamp = self.rolloverAt - self.interval
        rollover_timetuple = time.gmtime(
            rollover_timestamp) if self.utc else time.localtime(
                rollover_timestamp)

        rollover_filename = '{}-{}.log'.format(
            splitext(self.baseFilename)[0],
            time.strftime(self.suffix, rollover_timetuple),
        )

        if exists(self.baseFilename):
            # Backup current log file
            rename_file(self.baseFilename, rollover_filename + '.bak')

            # gzip backup log file
            self._deferred_save_archive(rollover_filename)

        # print "%s -> %s" % (self.baseFilename, dfn)
        self.mode = 'w'
        self.stream = self._open()
        self._compute_next_rollover()
Esempio n. 2
0
    def merge(self, segment1, segment2):
        ''' (self, str, str) -> str
        Concatenates the contents of the files represented byt segment1 and
        segment2, erases the second segment file and returns the name of the
        first segment. 
        '''
        path1 = self.segments_directory + segment1
        path2 = self.segments_directory + segment2
        new_path = self.segments_directory + 'temp'

        with open(new_path, 'w') as s0:
            with open(path1, 'r') as s1:
                with open(path2, 'r') as s2:
                    line1, line2 = s1.readline(), s2.readline()
                    while not (line1 == '' and line2 == ''):
                        # At the end of the file stream we'll get the empty str
                        key1, key2 = line1.split(',')[0], line2.split(',')[0]

                        if key1 == '' or key1 == key2:
                            s0.write(line2)
                            line1 = s1.readline()
                            line2 = s2.readline()
                        elif key2 == '' or key1 < key2:
                            s0.write(line1)
                            line1 = s1.readline()
                        else:
                            s0.write(line2)
                            line2 = s2.readline()

        # Remove old segments and replaced first segment with the new one
        remove_file(path1)
        remove_file(path2)
        rename_file(new_path, path1)

        return segment1
Esempio n. 3
0
def backup(args=None):
    if not backups:
        return print('No backups available.')
    while True:
        filename, idx = choice(
            "Choose a backup file by a number on the left or type 'exit' to leave the menu.\n",
            backups, ChoiceReturnType.option_and_index)
        if filename is None:
            return print('No backup file chosen. Exited the menu.')
        print(f'File {filename} selected.')
        option = choice('Select an option', ['Load', 'Delete', 'Cancel'],
                        ChoiceReturnType.index)
        if option in [None, 2]:
            # cancel
            print('Operation cancelled.')
            continue
        if option == 1:
            # delete
            # ensure that 'trash' folder exists
            Path(data_('trash')).mkdir(parents=True, exist_ok=True)
            # move selected file to trash
            rename_file(data_(filename), join(DATA_DIR, 'trash', filename))
            del backups[idx]
            print("Selected file was moved to the app's trash folder.")
            continue
        # load
        return print('Data loaded successfully.' if load_ciphertext(
            data_(filename)) else 'Operation aborted.')
Esempio n. 4
0
    def download(self, year, force=False):
        filename = self.filename(year)
        if not force and filename.exists():  # File has already been downloaded
            return {"downloaded": False, "filename": filename}

        url = self.url(year)
        file_data = download_file(url, progress=True)
        rename_file(file_data.uri, filename)
        return {"downloaded": True, "filename": filename}
Esempio n. 5
0
    def _next_filename(self, count, extension):
        for i in range(self.backupCount - 1, 0, -1):
            sfn = '{}.{}.gz'.format(self.baseFilename, i)
            dfn = '{}.{}.gz'.format(self.baseFilename, i + 1)

            if exists(sfn):
                if exists(dfn):
                    delete_file(dfn)

                rename_file(sfn, dfn)

            dfn = self.baseFilename + '.1.gz'
Esempio n. 6
0
    def rename_segment_files(self, result):
        ''' (self) -> [str]
        Renames the segment files on disk to make sure that their suffixes are 
        in proper ascending order.
        '''
        corrected_names = self.rename_segments(result)
        for idx, segment in enumerate(result):
            old_path = self.segments_dir_name + segment
            new_path = self.segments_dir_name + corrected_names[idx]
            rename_file(old_path, new_path)

        return corrected_names
Esempio n. 7
0
    def delete_keys_from_segment(self, deletion_keys, segment_path):
        ''' (self, set(keys), str) -> None
        Removes the lines with key in deletion_keys from the file stored at segment 
        path.

        The method achieves this by writing the desireable keys to a new 
        temporary file, then deleting the old version and replacing it with the
        temporary one. This strategy is chosen to avoid overloading memory.
        '''
        temp_path = segment_path + '_temp'

        with open(segment_path, "r") as input:
            with open(temp_path, "w") as output:
                for line in input:
                    key, value = line.split(',')
                    if not key in deletion_keys:
                        output.write(line)

        remove_file(segment_path)
        rename_file(temp_path, segment_path)
Esempio n. 8
0
def main():
    global progress_bar

    # get the list of resolvers
    res = requests.get(NS_LIST_URL)
    if res.status_code == 200:
        # perform a baseline test to compare against
        sanity_check = perform_lookup(config.baseline_server,
                                      config.query_domain,
                                      tries=5)

        if sanity_check is not None:
            sanity_check = set(sanity_check)

            all_resolvers = res.content.decode().splitlines()
            initial_resolvers = []

            if config.no_clean:
                # skip cleaning
                initial_resolvers = all_resolvers
            else:
                # remove false positives
                for line in all_resolvers:
                    replace_result = [
                        bool(re.sub(regex, '', line))
                        for regex in config.clean_regex
                    ]
                    if all(replace_result):
                        initial_resolvers.append(line)

            # remove any existing output_file
            if path_exists(config.output_file):
                if config.keep_old:
                    name, ext = split_path(config.output_file)
                    backup_name = '{}-{}{}'.format(name, uuid4().hex, ext)
                    print('[*] Output file already exists, renaming {} to {}'.
                          format(config.output_file, backup_name))

                    rename_file(config.output_file, backup_name)

                    # path still exists, rename failed
                    if path_exists(config.output_file):
                        print('[!] Rename failed, outputting to {} instead!'.
                              format(backup_name))
                        config.output_file = backup_name
                else:
                    print('[*] Overwriting existing output file {}'.format(
                        config.output_file))
                    remove_file(config.output_file)

            # create progress bar if not verbose mode
            if not config.verbose:
                progress_bar = tqdm(total=len(initial_resolvers),
                                    unit='resolvers')

            # create a thread pool and start the workers
            thread_pool = ThreadPool(config.job_count)
            workers = []
            for resolver in initial_resolvers:
                w = thread_pool.apply_async(check_resolver,
                                            (resolver, sanity_check),
                                            callback=callback)
                workers.append(w)

            # ensure all workers complete
            for w in workers:
                w.get()

            thread_pool.close()
            thread_pool.join()

            if not config.verbose:
                progress_bar.close()
        else:
            print(
                'Error performing baseline sanity check! (DNS lookup {} using {})'
                .format(config.query_domain, config.baseline_server))