Example #1
0
    def run(self):

        # Eveloghandler
        self.eveloghandler_watchdog = eveloghandler.EveLogHandler(
            self.watched_channels, self.ignore_patterns,
            self.ignore_directories, self.case_sensitive, self.configuration,
            self.logger)
        self.eveloghandler_watchdog.message_ready.connect(
            self.test_catch_connection)
        # Observer
        self.watchdog_observer = Observer()
        self.watchdog_observer.schedule(
            self.eveloghandler_watchdog,
            self.configuration.value['eve_log_location'], False)
        self.watchdog_observer.start()

        while not self.event_stop.is_set():
            # Windows workaround - for some reason Eve doesn't trigger the watchdog modify event on Windows.
            if self.platform == "windows":
                path = Path(self.configuration.value["eve_log_location"])
                for file in path.iterdir():
                    Path(file).touch()
                time.sleep(2)
            else:
                time.sleep(1)

        # when stopping
        self.eveloghandler_watchdog.pickle_dict()
        self.watchdog_observer.stop()
Example #2
0
    def convert_to_csv_from_folder(self,
                                   dat_folder: str,
                                   csv_folder: Optional[str] = None) -> None:
        """Convert all dat in a folder to csv

        Iterate over the given folder
        Then normalize each .dat file found to .csv

        :param dat_folder: folder containing .dat files
        :param csv_folder: folder in which store CSV
        :return: None
        """
        folder = Path(dat_folder)

        if not folder.exists() \
                or not folder.is_dir():
            raise BadFileFormatException

        if not csv_folder:
            csv_folder = self.DEFAULT_OUTPUT_FOLDER

        for file in folder.iterdir():
            if file.suffix != Dat.ext:
                continue

            self.convert_to_csv(
                dat_path=str(file),
                csv_path=f'{csv_folder}{file.name.replace(Dat.ext, Csv.ext)}')
Example #3
0
    def _get_system_wide_trusted_ca_certificates(self):
        trusted_cas, errors = set([]), []
        for p in self.system_wide_trusted_ca_search_paths:
            cert_path = Path(p)

            if not cert_path.is_dir():
                continue

            for entry in cert_path.iterdir():
                cert_file_path = entry.absolute()
                try:
                    if entry.suffix not in [".pem", ".crt"]:
                        continue

                    trusted_cas.update(self._get_certificates_from_file(cert_file_path))
                except IOError:
                    logger.exception()

                    # This error is shown to the user as warning message during "activate changes".
                    # We keep this message for the moment because we think that it is a helpful
                    # trigger for further checking web.log when a really needed certificate can
                    # not be read.
                    #
                    # We know a permission problem with some files that are created by default on
                    # some distros. We simply ignore these files because we assume that they are
                    # not needed.
                    if cert_file_path == Path("/etc/ssl/certs/localhost.crt"):
                        continue

                    errors.append("Failed to add certificate '%s' to trusted CA certificates. "
                                  "See web.log for details." % cert_file_path)

            break

        return list(trusted_cas), errors
def get_roi_from_gt(data_path, roi_file):
    data_path = Path(data_path)
    
    cases = sorted([d for d in data_path.iterdir() if d.is_dir()])
    case_idx = 0
    rois = {}
    for case in tqdm(cases, ascii=True, dynamic_ncols=True):
        img_file = case / 'imaging.nii.gz'
        assert img_file.exists()
        img = nib.load(str(img_file)).get_data()
        total_z, total_y, total_x = img.shape
        vol = {'total_x': total_x, 'total_y': total_y, 'total_z': total_z}
        case_data = {'vol': vol}
        
        seg_file = case / 'segmentation.nii.gz'
        if seg_file.exists():
            seg = nib.load(str(seg_file)).get_data()
            kidney = calc(seg, idx=1)
            tumor = calc(seg, idx=2)
            case_data.update({'kidney': kidney, 'tumor': tumor})
        
        rois[f'case_{case_idx:05d}'] = case_data
        
        with open(roi_file, 'w') as f:
            json.dump(rois, f, indent=4, separators=(',', ': '))
        
        case_idx += 1
Example #5
0
def main(src, dest):
    """links configfiles from one folder to another

    if links exists it verifies content
    if files exist at the target side it errors

    Args:
        src: source folder
        dest: target folder
    """
    src = Path(src)
    if not src.exists():
        print("WARNING:", src, "does not exist, skipping linking")
        return

    dest = Path(dest)

    for element in filter(_is_yaml_file, src.iterdir()):
        _warn_on_unknown_encryption(element)
        target = dest.joinpath(element.name)
        # the following is fragile
        if target.is_symlink():
            _warn_on_missmatching_symlink(src=element, target=target)
        elif target.is_file():
            _warn_on_existing_file(target)
        else:
            target.symlink_to(element.resolve())
Example #6
0
def upload_img(request):
    # 请求方法为POST时,进行处理;
    if request.method == "POST":
        # 获取上传的文件,如果没有文件,则默认为None;
        print 'in post...'
        File = request.FILES.get("myfile", None)
        if File is None:
            return HttpResponse("no files for upload!")
        else:
            LIB_PATH = ABS_PATH + 'heliwu'
            # 读出所有 lib name 列表
            lib_path = Path(ABS_PATH)
            lib_names = []
            for lib in lib_path.iterdir():
                lib_names.append(lib.name)
            # 打开特定的文件进行二进制的写操作;
            image_file_path = LIB_PATH + "/%s" % File.name
            with open(image_file_path, 'wb+') as f:
                # 分块写入文件;
                for chunk in File.chunks():
                    f.write(chunk)
                return render(request,
                              'predict.html',
                              context={
                                  "lib_names": lib_names,
                                  'image_file_path': image_file_path
                              })
    else:
        print 'in get...'
        return render(request, 'upload_img.html')
Example #7
0
 def scrape(self, path: pathlib2.Path, depth: int) -> Tuple[FileNode, bool]:
     """
     Use recursion to scrape a given path and return a tree structure
     :param path: target file path to scrape
     :param depth: depth of node with respect to the root node
     :return: the scraped file node tree and whether any target files set by filters were found
     """
     children = []
     found_any = False
     paths = list(path.iterdir())
     for filter_ in self.filters:
         paths = filter_(paths)
     if depth != self.depth_limit:
         for filepath in paths:
             node = FileNode(filepath, depth=depth + 1, root=self.root)
             if (filepath.is_symlink() or filepath.is_dir()
                 ) and node.get_id() not in self.history:
                 subtree, found_any_ = self.scrape(filepath, depth + 1)
                 if found_any_:
                     found_any = True
                 if self._keep_empty_dir or found_any_:
                     children.append(subtree)
             elif filepath.is_file():
                 children.append(node)
                 found_any = True
             else:
                 pass
             self.history.add(node.get_id())
     return FileNode(path, children=children, depth=depth,
                     root=self.root), found_any
Example #8
0
def main(src, dest):
    """links configfiles from one folder to another

    if links exists it verifies content
    if files exist at the target side it errors

    Args:
        src: source folder
        dest: target folder
    """
    src = Path(src)
    if not src.exists():
        print("WARNING:", src, "does not exist, skipping linking")
        return

    dest = Path(dest)

    for element in filter(_is_yaml_file, src.iterdir()):
        _warn_on_unknown_encryption(element)
        target = dest.joinpath(element.name)
        # the following is fragile
        if target.is_symlink():
            _warn_on_missmatching_symlink(src=element, target=target)
        elif target.is_file():
            _warn_on_existing_file(target)
        else:
            target.symlink_to(element.resolve())
Example #9
0
    def download_images(self, services, registry, tag=None, nopull=False):
        """Download images from a remote registry and save to kard

        Args:
          * services: the name of the images to download
          * registry: a DockerRegistry instance
          * tag: the tag of the version to download
        """
        tag = tag or self.kard.meta['tag']

        save_path = Path(self.kard.path) / 'images'
        write('Cleaning images destination {}'.format(save_path))
        save_path.mkdir(exist_ok=True)
        for child in save_path.iterdir():
            child.unlink()

        if not nopull:
            self.pull_images(services, registry, tag=tag)

        for service in services:
            image_path = save_path / "{}.tar".format(service)
            image_name = self.make_image_name(service, tag)
            write('Saving {} to {}'.format(image_name, image_path))
            sys.stdout.flush()

            with open(image_path, 'wb') as f:
                for chunk in self.docker.get_image(image_name):
                    f.write(chunk)

            write(' Done !' + '\n')
        write('All images has been saved successfully !' + '\n')
def move_flattened_files(src_dir, out_dir, filt):
    p = Path(src_dir)
    if p.is_dir():
        for f in p.iterdir():
            if f.is_file() and filt(f.absolute()):
                copyfile(str(f.absolute()),str(out_dir + u'/' + f.name))
            else:
                move_flattened_files(str(f.absolute()), out_dir, filt)
Example #11
0
def conversion_all(data, output):
    data = Path(data)
    output = Path(output)

    cases = sorted([d for d in data.iterdir() if d.is_dir()])
    pool = mp.Pool()
    pool.map(conversion, zip(cases, [output] * len(cases)))
    pool.close()
    pool.join()
def _get_data(request, data_type):
    data_dir = Path(DATA_PATH)
    result = None
    for file in data_dir.iterdir():
        parts = file.stem.split('__')
        test_name = request.node.name.split('test_')[-1]
        test_name = test_name[:test_name.index('[')] if '[' in test_name else test_name
        if parts[0] == data_type:
            if parts[1] == test_name or (parts[1] == 'default' and result is None):
                result = yaml.safe_load(file.read_text())
    return result
Example #13
0
        def _get_cache_file(self, remote_url):
            """
            :param remote_url: check if we have the remote url in our cache
            :return: full path to file name, current file size or None
            """
            folder = Path(get_cache_dir() /
                          CacheManager._storage_manager_folder / self._context)
            folder.mkdir(parents=True, exist_ok=True)
            local_filename = self._get_hashed_url_file(remote_url)
            new_file = folder / local_filename
            if new_file.exists():
                new_file.touch(exist_ok=True)

            # delete old files
            def sort_max_access_time(x):
                atime = x.stat().st_atime
                # noinspection PyBroadException
                try:
                    if x.is_dir():
                        dir_files = list(x.iterdir())
                        atime = (max(
                            atime, max(
                                s.stat().st_atime
                                for s in dir_files)) if dir_files else atime)
                except Exception:
                    pass
                return atime

            files = sorted(folder.iterdir(),
                           reverse=True,
                           key=sort_max_access_time)
            files = files[self._file_limit:]
            for f in files:
                if not f.is_dir():
                    f.unlink()
                else:
                    try:
                        shutil.rmtree(f)
                    except Exception as e:
                        # failed deleting folder
                        LoggerRoot.get_base_logger().warning(
                            "Exception {}\nFailed deleting folder {}".format(
                                e, f))

            # if file doesn't exist, return file size None
            return (
                new_file.as_posix(),
                new_file.stat().st_size if new_file.exists() else None,
            )
Example #14
0
def test_all_files_cleaned_up(builder):
    builder.assign('x', 1)

    @builder
    def x_plus_one(x):
        return x + 1

    flow = builder.build()
    assert flow.get('x_plus_one') == 2

    flow = builder.build()
    assert flow.get('x_plus_one') == 2

    tmp_dir_path = Path(flow.get('core__persistent_cache__flow_dir')) / 'tmp'
    assert list(tmp_dir_path.iterdir()) == []
Example #15
0
def test_update_and_build():
    if quick_tests_only:
        return

    source = Path(__file__).parent / 'Examples'
    destination = Path(tempfile.mkdtemp()) / 'Examples'

    shutil.copytree(source, destination)

    project_paths = [path for path in destination.iterdir() if path.is_dir()]

    for project_path in project_paths:

        with work_directory(project_path):

            output = runner.check_run('punic update')
Example #16
0
def test_update_and_build():
    if quick_tests_only:
        return

    source = Path(__file__).parent / 'Examples'
    destination = Path(tempfile.mkdtemp()) / 'Examples'

    shutil.copytree(source, destination)

    project_paths = [path for path in destination.iterdir() if path.is_dir()]

    for project_path in project_paths:

        with work_directory(project_path):

            output = runner.check_run('punic update')
Example #17
0
def py_git_stat():
    # get the current working directory
    cwd = os.getcwd()
    # status messages are friends not food
    print('Listing git statuses for ' + cwd)
    # set up Path object in cwd to start workin'
    p = Path(cwd)
    # get subdirs of the cwd
    subdirs = [item for item in p.iterdir() if item.is_dir()]
    # then, see if each subdir contains ".git" and put it in git_repos list
    git_repos = []
    for subdir in subdirs:
        q = subdir / '.git'
        if q.exists():
            git_repos.append(subdir)

    # now that we have only top-level git repos, let's execute git status on all of them, then store the
    # command output

    status_results = {}
    for git_repo in git_repos:
        os.chdir(str(git_repo.resolve()))
        result = subprocess.check_output(['git', 'status'])
        status_results.update({git_repo.name: result.split('\n')})

    # let's print out some headers for the table. first, get width of the terminal
    if get_terminal_size()[0] != 0:
        term_width = get_terminal_size()[0]
    else:
        term_width = 80

    print_row_separators(term_width)

    # let's get the longest repo name and make that the width of the first column in the table.
    longest_repo_name = (get_longest_element(status_results)) + 1
    longest_repo_branch = (get_longest_element(status_results.values()[1])) + 1
    print('{0: <{longest_repo_name}}|{1}'.format(
        'git repository', 'repo branch', longest_repo_name=longest_repo_name))
    print_row_separators(term_width)
    for repo_name, repo_info in status_results.iteritems():
        print('{0: <{longest_repo_name}}|{1: <{longest_repo_branch}}'.format(
            repo_name,
            repo_info[0],
            longest_repo_name=longest_repo_name,
            longest_repo_branch=longest_repo_branch))
Example #18
0
def update_journal(rrdfile, rrdfilenew):
    journaldir = Path(cmk.utils.paths.omd_root, 'var/rrdcached/')
    for filepath in journaldir.iterdir():
        logger.info('- Updating journal file %s', filepath)
        new_file = filepath.with_suffix(filepath.suffix + '.new')
        try:
            with filepath.open('r') as old_jou, new_file.open('w') as new_jou:
                for line in old_jou:
                    if rrdfile in line:
                        line = line.replace(rrdfile, rrdfilenew)
                    new_jou.write(line)
        except Exception:
            new_file.unlink()
            raise
        finally:
            if new_file.exists():
                filepath.unlink()
                new_file.rename(filepath)
Example #19
0
def main(src, dest, force):
    """links configfiles from one folder to another

    if links exists it verifies content
    if files exist at the target side it errors

    Args:
        src: source folder
        dest: target folder
        force: override existing symlinks
    """
    src = Path(src)
    if not src.exists():
        print("WARNING:", src, "does not exist, skipping linking")
        return

    dest = Path(dest)

    for element in filter(_is_yaml_file, src.iterdir()):
        _warn_on_unknown_encryption(element)
        target = dest.joinpath(element.name)

        if force:
            try:
                target.symlink_to(element.resolve())
            except OSError as e:
                if e.errno == errno.EEXIST:
                    backup_target = Path(dest.joinpath(element.name + "_bak"))
                    print("Replacing", target.name, "and saving backup as", backup_target.name)
                    # Would use 'backup_target.replace()' here but that's only supported in py3
                    if backup_target.exists():
                        os.remove(str(backup_target))
                    target.rename(backup_target)

                    target.symlink_to(element.resolve())
                else:
                    raise
        else:
            if target.is_symlink():
                # If symlink already exists and points to same src, do nothing.
                _check_missmatching_symlink(src=element, target=target)
            elif _check_existing_file(target):
                target.symlink_to(element.resolve())
                print("Symlink created for", target.name)
Example #20
0
 def update(self, path):
     if (type(path) != str):
         index = path[1]
         path = path[0]
     config = Path(path)
     tknzr = TweetTokenizer()
     lex, doc = dict(), dict()
     if (config.is_file()):  # If just a file
         with config.open() as f:
             sentences = sent_tokenize(f.read())
             document = path.split('/')[-1]
             doc[document] = list(sentences)
             for i, sentence in enumerate(sentences):
                 token_list = tknzr.tokenize(sentence)
                 for word in token_list:
                     if (word not in self.stopwords
                             and self.is_not_number(word)
                             and len(word) > 1):
                         if (pos_tag([word])[0][1] != 'NNP'):
                             word = word.lower()
                         if (word not in lex):
                             lex[word] = Word(word, document, i)
                         else:
                             lex[word].update(document, i)
         return (lex, doc)
     elif (config.is_dir()
           ):  # If directory, recursively read each directory and file
         if (path[-1] != '/'): path = path + '/'
         iterdir = list()
         for d in config.iterdir():
             iterdir.append(path + d.name)
         self.outputs = [0] * len(list(iterdir))
         p = Pool(self.maxPoolSize)
         outputs = p.map(self.update, zip(iterdir,
                                          list(range(len(iterdir)))))
         for (lex, doc) in outputs:
             for k, v in lex.items():
                 if (k not in self.lexicon):
                     self.lexicon[k] = Word(k, v.documents)
                 self.lexicon[k].count += v.count
                 self.lexicon[k].documents.update(v.documents)
             self.documents.update(doc)
     else:
         print('{} doesn\'t exist.'.format(path))
    def create_tables(self,
                      sql_folder: str = DatabaseUtils.sql_tables_path) -> None:
        """Create all tables from their schemas

        :see DatabaseUtils.sql_tables:
        """
        # reach the sql sources folder
        folder = Path(sql_folder)
        if not folder.exists() \
                or not folder.is_dir():
            raise BadFileFormatException

        # for each file in the folder
        for file in folder.iterdir():
            # if it is an sql file, execute it
            if file.suffix != DatabaseUtils.sql_extension:
                continue
            self._cursor.execute(file.read_text())

        # commit transactions changes
        self._connection.commit()
Example #22
0
def copy_dir(source, target):
    """Remove existing files/folders before copying."""

    source = Path(source)
    if not source.exists():
        return 0
    target = Path(target)
    if not target.exists():
        target.mkdir(parents=1)
    for s in source.iterdir():
        t = target.joinpath(s.name)
        if t.exists():
            if t.is_dir():
                shutil.rmtree(str(t))
            else:
                os.remove(str(t))

        if s.is_dir():
            shutil.copytree(str(s), str(t))
        else:
            shutil.copyfile(str(s), str(t))
Example #23
0
    def import_images(self, services, tag=None):
        """Import images from kard to local docker

        Args:
          * services: the name of the images to load
          * tag: the tag of the version to load
        """
        tag = tag or self.kard.meta['tag']

        save_path = Path(self.kard.path) / 'images'
        for child in save_path.iterdir():
            service = child.name[:-4]
            if service not in services:
                continue
            write('Importing {} ...'.format(child))
            with open(child, 'rb') as f:
                rsp = self.docker.load_image(f.read())
            for message in rsp:
                write(message.get('stream', ''))
            write('\n')
        write('All images have been loaded successfully !' + '\n')
Example #24
0
        def _get_cache_file(self, remote_url):
            """
            :param remote_url: check if we have the remote url in our cache
            :return: full path to file name, current file size or None
            """
            folder = Path(get_cache_dir() /
                          CacheManager._storage_manager_folder / self._context)
            folder.mkdir(parents=True, exist_ok=True)
            local_filename = self._get_hashed_url_file(remote_url)
            new_file = folder / local_filename
            if new_file.exists():
                new_file.touch(exist_ok=True)

            # delete old files
            files = sorted(folder.iterdir(),
                           reverse=True,
                           key=lambda x: x.stat().st_atime)
            files = files[self._file_limit:]
            for f in files:
                f.unlink()

            # if file doesn't exist, return file size None
            return new_file.as_posix(), new_file.stat(
            ).st_size if new_file.exists() else None
Example #25
0
        def get_cache_file(self, remote_url=None, local_filename=None):
            # type: (Optional[str], Optional[str]) -> Tuple[str, Optional[int]]
            """
            :param remote_url: check if we have the remote url in our cache
            :param local_filename: if local_file is given, search for the local file/directory in the cache folder
            :return: full path to file name, current file size or None
            """
            def safe_time(x):
                # noinspection PyBroadException
                try:
                    return x.stat().st_mtime
                except Exception:
                    return 0

            def sort_max_access_time(x):
                atime = safe_time(x)
                # noinspection PyBroadException
                try:
                    if x.is_dir():
                        dir_files = list(x.iterdir())
                        atime = (max(atime, max(
                            safe_time(s)
                            for s in dir_files)) if dir_files else atime)
                except Exception:
                    pass
                return atime

            folder = Path(get_cache_dir() /
                          CacheManager._storage_manager_folder / self._context)
            folder.mkdir(parents=True, exist_ok=True)
            local_filename = local_filename or self.get_hashed_url_file(
                remote_url)
            new_file = folder / local_filename
            new_file_exists = new_file.exists()
            if new_file_exists:
                # noinspection PyBroadException
                try:
                    new_file.touch(exist_ok=True)
                except Exception:
                    pass

            # first exclude lock files
            lock_files = dict()
            files = []
            for f in sorted(folder.iterdir(),
                            reverse=True,
                            key=sort_max_access_time):
                if f.name.startswith(
                        CacheManager._lockfile_prefix) and f.name.endswith(
                            CacheManager._lockfile_suffix):
                    # parse the lock filename
                    name = f.name[len(CacheManager._lockfile_prefix
                                      ):-len(CacheManager._lockfile_suffix)]
                    num, _, name = name.partition(".")
                    lock_files[name] = lock_files.get(name,
                                                      []) + [f.as_posix()]
                else:
                    files.append(f)

            # remove new lock files from the list (we will delete them when time comes)
            for f in files[:self._file_limit]:
                lock_files.pop(f.name, None)

            # delete old files
            files = files[self._file_limit:]
            for i, f in enumerate(files):
                if i < self._file_limit:
                    continue

                # check if the file is in the lock folder list:
                folder_lock = self._folder_locks.get(f.absolute().as_posix())
                if folder_lock:
                    # pop from lock files
                    lock_files.pop(f.name, None)
                    continue

                # check if someone else holds the lock file
                locks = lock_files.get(f.name, [])
                for lck in locks:
                    try:
                        a_lock = FileLock(filename=lck)
                        a_lock.acquire(timeout=0)
                        a_lock.release()
                        a_lock.delete_lock_file()
                        del a_lock
                    except LockException:
                        # someone have the lock skip the file
                        continue

                # if we got here we need to pop from the lock_files, later we will delete the leftover entries
                lock_files.pop(f.name, None)

                # if we are here we can delete the file
                if not f.is_dir():
                    # noinspection PyBroadException
                    try:
                        f.unlink()
                    except Exception:
                        pass
                else:
                    try:
                        shutil.rmtree(f.as_posix(), ignore_errors=False)
                    except Exception as e:
                        # failed deleting folder
                        LoggerRoot.get_base_logger().debug(
                            "Exception {}\nFailed deleting folder {}".format(
                                e, f))

            # cleanup old lock files
            for lock_files in lock_files.values():
                for f in lock_files:
                    # noinspection PyBroadException
                    try:
                        os.unlink(f)
                    except BaseException:
                        pass

            # if file doesn't exist, return file size None
            # noinspection PyBroadException
            try:
                size = new_file.stat().st_size if new_file_exists else None
            except Exception:
                size = None

            return new_file.as_posix(), size
Example #26
0
from lisbonpose.lisbonpose import Lisbon
import cv2
import numpy as np
from pathlib2 import Path

lisbon = Lisbon()

vidspath = Path('Data/Videos/')
vidnames = [e for e in vidspath.iterdir()]

for i in vidnames:
    #print(i.suffix)
    #print(i.stem)
    info = i.stem.split('_')
    info.pop()
    seperator = '/'
    new_path = 'Data/clean/' + seperator.join(info)
    Path(new_path).mkdir(parents=True, exist_ok=True)
    new_vidpath = Path(new_path) / i.name
    if not new_vidpath.exists():
        with new_vidpath.open(mode='xb') as fid:
            fid.write(i.read_bytes())
Example #27
0
        def get_cache_file(self, remote_url=None, local_filename=None):
            """
            :param remote_url: check if we have the remote url in our cache
            :param local_filename: if local_file is given, search for the local file/directory in the cache folder
            :return: full path to file name, current file size or None
            """
            def safe_time(x):
                # noinspection PyBroadException
                try:
                    return x.stat().st_mtime
                except Exception:
                    return 0

            def sort_max_access_time(x):
                atime = safe_time(x)
                # noinspection PyBroadException
                try:
                    if x.is_dir():
                        dir_files = list(x.iterdir())
                        atime = (max(atime, max(
                            safe_time(s)
                            for s in dir_files)) if dir_files else atime)
                except Exception:
                    pass
                return atime

            folder = Path(get_cache_dir() /
                          CacheManager._storage_manager_folder / self._context)
            folder.mkdir(parents=True, exist_ok=True)
            local_filename = local_filename or self._get_hashed_url_file(
                remote_url)
            new_file = folder / local_filename
            new_file_exists = new_file.exists()
            if new_file_exists:
                # noinspection PyBroadException
                try:
                    new_file.touch(exist_ok=True)
                except Exception:
                    pass

            # delete old files
            files = sorted(folder.iterdir(),
                           reverse=True,
                           key=sort_max_access_time)
            files = files[self._file_limit:]
            for f in files:
                if not f.is_dir():
                    # noinspection PyBroadException
                    try:
                        f.unlink()
                    except Exception:
                        pass
                else:
                    try:
                        shutil.rmtree(f)
                    except Exception as e:
                        # failed deleting folder
                        LoggerRoot.get_base_logger().debug(
                            "Exception {}\nFailed deleting folder {}".format(
                                e, f))

            # if file doesn't exist, return file size None
            # noinspection PyBroadException
            try:
                size = new_file.stat().st_size if new_file_exists else None
            except Exception:
                size = None
            return new_file.as_posix(), size
Example #28
0
#!/usr/bin/python2

import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from pathlib2 import Path

MIN_MATCH_COUNT = 10

path = Path('./Sample Banana Plant/Images')

img_path_list = sorted([str(x) for x in path.iterdir()])

# Initiate ORB detector
orb = cv.ORB_create()

img_info = []

print("Obtaining keypoints and descriptors.")

for img_path in img_path_list:

    print(img_path)

    img = cv.imread(img_path, 1)

    # find the keypoints and descriptors with ORB
    kp, des = orb.detectAndCompute(cv.cvtColor(img, cv.COLOR_RGB2GRAY), None)

    img_info.append({
        "img": img,
Example #29
0
from Bio.Blast import NCBIWWW
from pathlib2 import Path
import logging
from datetime import datetime

data_in_catalog = "DataInExample"
data_out_catalog = 'DataOutExample'

logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)

startTime = datetime.now()

data_in = Path('.') / data_in_catalog
bacteria_paths = [x for x in data_in.iterdir() if x.is_file()]

for microbe_path in bacteria_paths:
    logging.info("Processing: {0}".format(microbe_path))

    with microbe_path.open() as f:
        rna = f.read()

        if rna == "":
            logging.warning("EMPTY FILE !!!")
            continue

        microbe = microbe_path.stem

        p = Path('.')
        path = p / data_out_catalog / microbe

        path.mkdir(parents=True, exist_ok=True)
Example #30
0
    def handle(self, *filenames, **options):
        """

         * All yang modules from published RFCs should be extracted and be
           available in an rfc-yang repository.

         * All valid yang modules from active, not replaced, internet drafts
           should be extracted and be available in a draft-valid-yang repository.

         * All, valid and invalid, yang modules from active, not replaced,
           internet drafts should be available in a draft-all-yang repository.
           (Actually, given precedence ordering, it would be enough to place
           non-validating modules in a draft-invalid-yang repository instead).

         * In all cases, example modules should be excluded.

         * Precedence is established by the search order of the repository as
           provided to pyang.

         * As drafts expire, models should be removed in order to catch cases
           where a module being worked on depends on one which has slipped out
           of the work queue.

        """

        verbosity = int(options.get('verbosity'))

        def extract_from(file, dir, strict=True):
            saved_stdout = sys.stdout
            saved_stderr = sys.stderr
            xymerr = StringIO()
            xymout = StringIO()
            sys.stderr = xymerr
            sys.stdout = xymout
            model_list = []
            try:
                model_list = xym.xym(str(file),
                                     str(file.parent),
                                     str(dir),
                                     strict=strict,
                                     debug_level=verbosity - 2)
                for name in model_list:
                    modfile = moddir / name
                    mtime = file.stat().st_mtime
                    os.utime(str(modfile), (mtime, mtime))
                    if '"' in name:
                        name = name.replace('"', '')
                        modfile.rename(str(moddir / name))
                model_list = [n.replace('"', '') for n in model_list]
            except Exception as e:
                print("** Error when extracting from %s: %s" % (file, str(e)))
            sys.stdout = saved_stdout
            sys.stderr = saved_stderr
            #
            if verbosity > 1:
                outmsg = xymout.getvalue()
                self.stdout.write(outmsg)
            if verbosity > 2:
                errmsg = xymerr.getvalue()
                self.stderr.write(errmsg)
            return model_list

        # Extract from new RFCs

        rfcdir = Path(settings.RFC_PATH)

        moddir = Path(settings.SUBMIT_YANG_RFC_MODEL_DIR)
        if not moddir.exists():
            moddir.mkdir(parents=True)

        latest = 0
        for item in moddir.iterdir():
            if item.stat().st_mtime > latest:
                latest = item.stat().st_mtime

        print("Extracting to %s ..." % moddir)
        for item in rfcdir.iterdir():
            if item.is_file() and item.name.startswith(
                    'rfc') and item.name.endswith(
                        '.txt') and item.name[3:-4].isdigit():
                if item.stat().st_mtime > latest:
                    model_list = extract_from(item, moddir)
                    for name in model_list:
                        if name.startswith('ietf') or name.startswith('iana'):
                            if verbosity > 1:
                                print("  Extracted from %s: %s" % (item, name))
                            else:
                                sys.stdout.write('.')
                                sys.stdout.flush()
                        else:
                            modfile = moddir / name
                            modfile.unlink()
                            if verbosity > 1:
                                print("  Skipped module from %s: %s" %
                                      (item, name))
        print("")

        # Extract valid modules from drafts

        six_months_ago = time.time() - 6 * 31 * 24 * 60 * 60

        def active(item):
            return item.stat().st_mtime > six_months_ago

        draftdir = Path(settings.INTERNET_DRAFT_PATH)

        moddir = Path(settings.SUBMIT_YANG_DRAFT_MODEL_DIR)
        if not moddir.exists():
            moddir.mkdir(parents=True)
        print("Emptying %s ..." % moddir)
        for item in moddir.iterdir():
            item.unlink()

        print("Extracting to %s ..." % moddir)
        for item in draftdir.iterdir():
            try:
                if item.is_file() and item.name.startswith(
                        'draft') and item.name.endswith('.txt') and active(
                            item):
                    model_list = extract_from(item, moddir)
                    for name in model_list:
                        if not name.startswith('example'):
                            if verbosity > 1:
                                print("  Extracted valid module from %s: %s" %
                                      (item, name))
                            else:
                                sys.stdout.write('.')
                                sys.stdout.flush()
                        else:
                            modfile = moddir / name
                            modfile.unlink()
                            if verbosity > 1:
                                print("  Skipped module from %s: %s" %
                                      (item, name))
            except UnicodeDecodeError as e:
                sys.stderr.write('\nError: %s\n' % (e, ))
                sys.stderr.write(item.name)
                sys.stderr.write('\n')
        print("")

        # Extract invalid modules from drafts
        valdir = moddir
        moddir = Path(settings.SUBMIT_YANG_INVAL_MODEL_DIR)
        if not moddir.exists():
            moddir.mkdir(parents=True)
        print("Emptying %s ..." % moddir)
        for item in moddir.iterdir():
            item.unlink()

        print("Extracting to %s ..." % moddir)
        for item in draftdir.iterdir():
            try:
                if item.is_file() and item.name.startswith(
                        'draft') and item.name.endswith('.txt') and active(
                            item):
                    model_list = extract_from(item, moddir, strict=False)
                    for name in model_list:
                        modfile = moddir / name
                        if (valdir / name).exists():
                            modfile.unlink()
                            if verbosity > 1:
                                print("  Skipped valid module from %s: %s" %
                                      (item, name))
                        elif not name.startswith('example'):
                            if verbosity > 1:
                                print(
                                    "  Extracted invalid module from %s: %s" %
                                    (item, name))
                            else:
                                sys.stdout.write('.')
                                sys.stdout.flush()
                        else:
                            modfile.unlink()
                            if verbosity > 1:
                                print("  Skipped module from %s: %s" %
                                      (item, name))
            except UnicodeDecodeError as e:
                sys.stderr.write('\nError: %s\n' % (e, ))
                sys.stderr.write(item.name)
                sys.stderr.write('\n')

        print("")
Example #31
0
    args['model_type'] = 'tfv2-MobileNetV2'
    #  mlflow.log_params(args)
    print('Writing out params...', end='')
    with open(str(params), 'w') as f:
        json.dump(args, f)

    print(' Saved to {}'.format(str(params)))

    info('Log Traning Parameters')
    parmeters = pd.read_json(str(params), typ='series')
    metadata = {
        'outputs': [{
            'type': 'table',
            'storage': 'inline',
            'format': 'csv',
            'header': ['Name', 'Value'],
            'source': parmeters.to_csv()
        }]
    }

    # Path(output_path).parent.mkdir(parents=True, exist_ok=True)
    with open(str(ui_metadata_file), 'w') as f:
        json.dump(metadata, f)

    model_output_content = []
    for filename in target_path.iterdir():
        model_output_content.append(str(filename))

    with open(str(output_model_file), 'w+') as f:
        f.write('\n'.join(model_output_content))
from pyjoplin.models import Note, NoteIndex, database as db
from pyjoplin.configuration import config
from pyjoplin import notification

path_exportdir = Path('/run/media/jesusbriales/KINGSTON/joplin-ThinkPad/')

printc = lambda x: print(colored(x, 'cyan'))

notes = Note.select().order_by(Note.title)
print("Listing empty notes:")
for note in notes:
    if not note.body:
        printc('Empty: %s %s' % (note.id, note.title))
        notefile = '%s.md' % note.id
        if path_exportdir / notefile in path_exportdir.iterdir():
            note.from_file(str(path_exportdir / notefile))
            filtered_lines = list()
            for line in note.body.split('\n'):
                if line.startswith('id: '):
                    break
                filtered_lines.append(line)
            note.body = '\n'.join(filtered_lines)
            if not note.body:
                continue
            printc('Found non-empty export file:')
            print(note.title)
            print('')
            print(note.body)
            yn = raw_input('Save loaded note content? (Y/n)')
            if yn == 'y' or yn == '':
Example #33
0
class InventoryHousekeeping(object):
    def __init__(self):
        super(InventoryHousekeeping, self).__init__()
        self._inventory_path = Path(cmk.utils.paths.var_dir) / "inventory"
        self._inventory_archive_path = Path(
            cmk.utils.paths.var_dir) / "inventory_archive"
        self._inventory_delta_cache_path = Path(
            cmk.utils.paths.var_dir) / "inventory_delta_cache"

    def run(self):
        if not self._inventory_delta_cache_path.exists(
        ) or not self._inventory_archive_path.exists(  # pylint: disable=no-member
        ):
            return

        last_cleanup = self._inventory_delta_cache_path / "last_cleanup"
        # TODO: remove with pylint 2
        if last_cleanup.exists(
        ) and time.time() - last_cleanup.stat().st_mtime < 3600 * 12:  # pylint: disable=no-member
            return

        # TODO: remove with pylint 2
        inventory_archive_hosts = {
            x.name
            for x in self._inventory_archive_path.iterdir() if x.is_dir()  # pylint: disable=no-member
        }
        inventory_delta_cache_hosts = {
            x.name
            for x in self._inventory_delta_cache_path.iterdir() if x.is_dir()  # pylint: disable=no-member
        }

        folders_to_delete = inventory_delta_cache_hosts - inventory_archive_hosts
        for foldername in folders_to_delete:
            shutil.rmtree(str(self._inventory_delta_cache_path / foldername))

        inventory_delta_cache_hosts -= folders_to_delete
        for hostname in inventory_delta_cache_hosts:
            available_timestamps = self._get_timestamps_for_host(hostname)
            for filename in [
                    x.name for x in (self._inventory_delta_cache_path /
                                     hostname).iterdir() if not x.is_dir()
            ]:
                delete = False
                try:
                    first, second = filename.split("_")
                    if first not in available_timestamps or second not in available_timestamps:
                        delete = True
                except ValueError:
                    delete = True
                if delete:
                    (self._inventory_delta_cache_path / hostname /
                     filename).unlink()

        # TODO: remove with pylint 2
        last_cleanup.touch()  # pylint: disable=no-member

    def _get_timestamps_for_host(self, hostname):
        timestamps = {"None"}  # 'None' refers to the histories start
        try:
            timestamps.add("%d" %
                           (self._inventory_path / hostname).stat().st_mtime)
        except OSError:
            pass

        for filename in [
                x for x in (self._inventory_archive_path / hostname).iterdir()
                if not x.is_dir()
        ]:
            timestamps.add(filename.name)
        return timestamps
Example #34
0
import sys
from pathlib2 import Path

if __name__ == '__main__':
    TESTCASE_DIR = Path(__file__).resolve().parent.joinpath('testcase')
    sys.path.append(str(TESTCASE_DIR))

    for p in TESTCASE_DIR.iterdir():
        if p.name.startswith('test') and p.name.endswith('.py'):
            if sys.version_info[0] == 3:
                import importlib.util
                spec = importlib.util.spec_from_file_location(p.name, str(p))
                testcase = importlib.util.module_from_spec(spec)
                spec.loader.exec_module(testcase)
                testcase.run()
            else:
                import imp
                testcase = imp.load_source(p.name, str(p))
                testcase.run()