コード例 #1
0
ファイル: os_study.py プロジェクト: incoging/python_study
def pathlib_func():
    '''
    适合于不同操作系统的类来对文件系统路径进行操作
    '''

    # === PurePath (纯路径) ===
    # PurePath(*pathsegments) // 系统地灵分割
    path_pure = pathlib.PurePath('hello.py')
    path_pure = pathlib.PurePath('python', 'hello.py')
    path_pure = pathlib.PurePath(pathlib.Path('python'),
                                 pathlib.Path('hello.py'))

    # PurePosixPath(*pathsegments) // 非windows文件系统路径
    # PureWindowsPath(*pathsegments) // windows文件系统路径

    # 路径是不可变哈希的, 同风格的路径是比较有序的
    pathlib.PurePosixPath('hello') == pathlib.PurePosixPath(
        'Hello')  # => Flase (PurePosixPath分大小写)
    pathlib.PureWindowsPath('hello') == pathlib.PureWindowsPath(
        'hello')  # => True (PureWindowsPath不分大小写)
    pathlib.PureWindowsPath('hello') in {pathlib.PureWindowsPath('Hello')
                                         }  # => True
    pathlib.PureWindowsPath('C:') < pathlib.PureWindowsPath('d:')  # => True

    # 不同风格的路径不能比较有序
    pathlib.PureWindowsPath('hello') == pathlib.PurePosixPath(
        'hello')  # => False
    # pathlib.PureWindowsPath('hello') < pathlib.PurePosixPath('hello')

    # 运算符
    path_pure = pathlib.PurePath('c:/')
    path_pure = path_pure / 'python' / 'hello.py'  # 拼接路径

    tups = path_pure.parts  # 返回组件元组 => ('c:\\', 'python', 'hello.py')
    drive = path_pure.drive  # 盘符 => 'c:'
    root = path_pure.root  # 根目录 => '\\'
    driroot = path_pure.anchor  # 盘符 + 根目录 => 'c:\\'
    parents = path_pure.parents  # 父路径列表 parents[0] => PureWindowsPath('c:/python')
    parent = path_pure.parent  # 父路径 => PureWindowsPath('c:/python')
    name = path_pure.name  # 文件(夹)名
    suffix = path_pure.suffix  # 扩展名
    suffixs = path_pure.suffixes  # 扩展名列表
    name = path_pure.stem  # 不带后缀名的文件(夹)名 => 'hello'

    path = path_pure.as_posix()  # 以/返回路径
    path = path_pure.as_uri(
    )  # 以file URL方式返回路径, 必须是绝对路径 => 'file:///c:/python/hello.py'
    path = path_pure.joinpath(
        "a.txt",
        "b.txt")  # 拼接路径 => PureWindowsPath('c:/python/hello.py/a.txt/b.txt')
    path = path_pure.with_name(
        "world.py")  # 更改路径文件(夹)名 => PureWindowsPath('c:/python/world.py')
    path = path_pure.with_suffix(".txt")  # 更改扩展名,没有则添加

    boolean = path_pure.match("*.py")  # 匹配通配符 (从右侧进行匹配) (大小写的区分见上方比较代码)
    boolean = path_pure.is_absolute()  # 是否是绝对路径 (/ // c:/ 开头都被认为是绝对路径)

    # === Path (具体路径) ===
    # Path(*pathsegments)
    path = pathlib.Path('hello.py')

    # PosixPath(*pathsegments)
    # WindowsPath(*pathsegments)

    path_s = path.cwd()  # 当前路径
    home = path.home()  # 用户主目录

    state = path.stat()  # 该路径的状态信息 同os.stat()
    state = path.lstat()  # 同stat(), 目标是软链接将返回软链接信息
    path.chmod(777)  # 修改权限模式 同os.chmod()
    path.lchmod(777)  # 同chmod(), 目标是软链接将更改软链接模式
    lists = path.glob(r'*.py')  # 迭代器, 所有py文件  (windows失效)
    group = path.group()  # 文件的组  (windows不可用)
    iter = path.iterdir()  # 遍历目录 (必须是目录)
    # mkdir(mode=0o777, parents=False, exist_ok=False)
    path.mkdir()  # 创建文件夹
    path.rmdir()  # 删除文件夹
    # open(mode='r', buffering=-1, encoding=None, errors=None, newline=None)  // 打开文件
    f = path.open()
    bytes = path.read_bytes()  # 读取文件内容
    num = path.write_bytes(b"hello")  # (覆盖)写入文件内容
    # read_text(encoding=None, errors=None)
    strs = path.read_text(encoding='utf-8')
    # write_text(data, encoding=None, errors=None)
    num = path.write_text("hello")
    ow = path.owner()  # 该文件拥有者 (windows不可用)
    path.rename("new.txt")  # 重命名,目标存在则替换
    path.replace("new.txt")  # 重命名,目标存在则替换
    path_s = path.resolve()  # 绝对路径
    path.symlink_to(
        "hello.py", target_is_directory=False
    )  # 创建软链接, 如果目标是目录, 则(windows下)target_is_directory=True (windows失败)
    path.unlink()  # 删除文件/文件软链接
    # touch(mode=0o666, exist_ok=True)
    path.touch()  # 创建文件

    boolean = path.exists()  # 文件(夹)是否存在
    boolean = path.is_dir()  # 是否是文件夹
    boolean = path.is_file()  # 是否是文件
    boolean = path.is_symlink()  # 是否是硬链接
    boolean = path.is_block_device()  # 是否是块设备
    boolean = path.is_char_device  # 是否是字符设备
    boolean = path.samefile(path)  # path是否是同一文件
コード例 #2
0
def resolve_wdir(wdir, path):
    rel_wdir = relpath(wdir, os.path.dirname(path))
    return pathlib.PurePath(rel_wdir).as_posix() if rel_wdir != "." else None
コード例 #3
0
def initialize_config_and_execute(config_values):
    global configs
    configs = config_values
    # Applying checks for paths
    
    p1 = pathlib.PurePath(configs['DICOMHome'])
    dicom_home = p1.as_posix() # the folder containing your dicom files

    p2 = pathlib.PurePath(configs['OutputDirectory'])
    output_directory = p2.as_posix()

    print_images = configs['PrintImages']
    print_only_common_headers = bool(configs['CommonHeadersOnly'])
    depth = int(configs['Depth'])
    processes = int(configs['UseProcesses']) # how many processes to use.
    flattened_to_level = configs['FlattenedToLevel']
    email = configs['YourEmail']
    send_email = configs['SendEmail']
    no_splits = int(configs['SplitIntoChunks'])
    is16Bit = bool(configs['is16Bit']) 
    
    metadata_col_freq_threshold = 0.1

    nifti_destination = output_directory + '/extracted-images/'
    failed = output_directory + '/failed-dicom/'
    maps_directory = output_directory + '/maps/'
    meta_directory = output_directory + '/meta/'

    LOG_FILENAME = output_directory + '/ImageExtractor.out'
    pickle_file = output_directory + '/ImageExtractor.pickle'
    dict_pickle_file = output_directory + '/ImageExtractork_dict.pickle'

    # record the start time
    t_start = time.time()

    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)

    if not os.path.exists(maps_directory):
        os.makedirs(maps_directory)

    if not os.path.exists(meta_directory):
        os.makedirs(meta_directory)

    if not os.path.exists(nifti_destination):
        os.makedirs(nifti_destination)

    if not os.path.exists(failed):
        os.makedirs(failed)

    if not os.path.exists(failed + "/1"):
        os.makedirs(failed + "/1")

    if not os.path.exists(failed + "/2"):
        os.makedirs(failed + "/2")

    if not os.path.exists(failed + "/3"):
        os.makedirs(failed + "/3")

    if not os.path.exists(failed + "/4"):
        os.makedirs(failed + "/4")

    logging.info("------- Values Initialization DONE -------")
    final_res = execute(pickle_file, dicom_home, output_directory, print_images, print_only_common_headers, depth,
                        processes, flattened_to_level, email, send_email, no_splits, is16Bit, nifti_destination,
        failed, maps_directory, meta_directory, LOG_FILENAME, metadata_col_freq_threshold, t_start,dict_pickle_file)
    return final_res
コード例 #4
0
    def pre_spray_info(self):
        """
        Display spray config table
        """
        spray_info = Table(
            show_header=False,
            show_footer=False,
            min_width=61,
            title=f"Module: {self.module.upper()}",
            title_justify="left",
            title_style="bold reverse",
        )

        spray_info.add_row("Target", f"{self.target.url}")

        if self.domain:
            spray_info.add_row("Domain", f"{self.domain}")

        if self.attempts:
            spray_info.add_row("Interval", f"{self.interval} minutes")
            spray_info.add_row("Attempts", f"{self.attempts} per interval")

        if self.jitter:
            spray_info.add_row("Jitter",
                               f"{self.jitter_min}-{self.jitter} seconds")

        if self.notify:
            spray_info.add_row("Notify", f"True ({self.notify})")

        log_name = pathlib.PurePath(self.log_name)
        out_name = pathlib.PurePath(self.output)
        spray_info.add_row("Logfile", f"{log_name.name}")
        spray_info.add_row("Results", f"{out_name.name}")

        console.print(spray_info)

        print()
        Confirm.ask(
            "[blue]Press enter to begin",
            default=True,
            show_choices=False,
            show_default=False,
        )
        print()

        if self.module == "Smb":
            console.print(f"[*] Initiaing SMB connection to {self.host} ...",
                          style="warning")
            if self.target.get_conn():
                console.print(
                    f'[+] Connected to {self.host} over {"SMBv1" if self.target.smbv1 else "SMBv3"}',
                    style="good",
                )

                console.print(f"\t[>] Hostname: {self.target.hostname} ",
                              style="info")
                console.print(f"\t[>] Domain: {self.target.domain} ",
                              style="info")
                console.print(f"\t[>] OS: {self.target.os} ", style="info")
                print()

            else:
                console.print(f"[!] Failed to connect to {self.host} over SMB",
                              style="danger")
                exit()

        self.target.print_headers(self.output)
コード例 #5
0
def prepare_repository(
    package,
    version,
    package_path,
    source_repo,
    dest_repo,
    http_root,
    skip_images,
    skip_cli
):
    dest_path = dest_repo / package_path.relative_to(source_repo)
    shutil.copytree(str(package_path), str(dest_path))

    dest_resource = dest_path / 'resource.json'
    with dest_resource.open('w', encoding='utf-8') as dest_file:
        resource = load_json(package_path / 'resource.json')

        # Change the root for images (ignore screenshots)
        if not skip_images and 'images' in resource:
            resource["images"] = {
                n: urllib.parse.urljoin(
                    http_root, str(pathlib.PurePath(
                        package, version, "images", pathlib.Path(uri).name)))
                for n, uri in resource.get("images", {}).items() if 'icon' in n}

        # Change the root for asset uris.
        if 'assets' in resource:
            resource["assets"]["uris"] = {
                n: urllib.parse.urljoin(
                    http_root, str(pathlib.PurePath(
                        package, version, "uris", pathlib.Path(uri).name)))
                for n, uri in resource["assets"].get("uris", {}).items()}

        # Change the root for cli uris.
        if not skip_cli and 'cli' in resource:
            for os_type, arch_dict in \
                    resource.get('cli', {}).get('binaries', {}).items():
                for arch in arch_dict.items():
                    uri = arch[1]["url"]
                    arch[1]["url"] = urllib.parse.urljoin(
                        http_root,
                        str(
                            pathlib.PurePath(
                                package,
                                version,
                                "uris",
                                os_type,
                                pathlib.Path(uri).name)))

        # Add the local docker repo prefix.
        if 'assets' in resource:
            if 'container' in resource["assets"]:
                resource["assets"]["container"]["docker"] = {
                    n: format_image_name(DOCKER_ROOT, image_name)
                    for n, image_name in resource["assets"]["container"].get(
                        "docker", {}).items()}

        json.dump(resource, dest_file, indent=4)

    command_path = (package_path / 'command.json')
    if not command_path.exists():
        return

    dest_command = dest_path / 'command.json'
    with dest_command.open('w', encoding='utf-8') as dest_file:
        command = load_json(command_path)

        command['pip'] = [
            urllib.parse.urljoin(
                http_root,
                str(
                    pathlib.PurePath(
                        package,
                        version,
                        "commands",
                        pathlib.Path(uri).name
                    )
                )
            )
            for uri in command.get("pip", [])
        ]
        json.dump(command, dest_file, indent=4)
コード例 #6
0
ファイル: ilastik.py プロジェクト: jburel/ilastik
 def issubdir(path):
     """Whether path is equal to or is a subdirectory of root."""
     path = pathlib.PurePath(path)
     return path == root or any(parent == root for parent in path.parents)
コード例 #7
0
import os, glob, pathlib, stat
from pwd import getpwuid
from pathlib import Path

for path, subdirs, files in os.walk("/"):
    for name in files:
        current = str(pathlib.PurePath(path, name))
        if Path(current).is_file():
            size = os.stat(current).st_size
            if size == 33:
                print(getpwuid(stat(current).st_uid).pw_name)
コード例 #8
0
from pathlib import Path
import pathlib

# 列出子目录
p = Path(".")
a = [x for x in p.iterdir() if x.is_dir()]
# print(a)

# 列出当前f盘的所有py文件
abc = list(p.glob("**\*.py"))
# print(abc)

# 查看快捷方式
p = Path(r"C:\Users\Administrator\Desktop\Everything.exe")
# print(p)
# print("="*30)
# print(p.resolve())

# 查询路径属性
q = Path(r'F:\PythonProject')
# print(q.exists())
# print(q.is_dir())

# 拼接路径
pin = pathlib.PurePath("test", "abc/dfa", "tes")
# print(pin)
# print(pathlib.PurePath(Path("目录1"),Path("子目录1")))

# 使用当前目录
# currentpath = pathlib.PurePath()
コード例 #9
0
def get_root():
    root = pathlib.PurePath(
        input("What's the full path where you'd like the project? "))
    if not root.is_absolute():
        return os.path.abspath(root)
    return root
コード例 #10
0
ファイル: hacks.py プロジェクト: descriptinc/descript-cerbero
def join(*args):
    return pathlib.PurePath(oldjoin(*args)).as_posix()
コード例 #11
0
ALL_LOGS = LOGS_DIR + "/logs"
shutil.rmtree(ALL_LOGS, ignore_errors=True)
LOG_DIR = ALL_LOGS + "/" + EXP_ID
os.makedirs(LOG_DIR)

CONTROLLER_PID = "CONTROLLER_PID"

### By default, we allocate 3 CPUs per run.
### Recall that we are running 3 processes, and they are multithreaded.
CPUS = 3

TIMEOUT_SUT_START_MINUTES = 20

if not CLUSTER:
    REPORT_DIR = str(pathlib.PurePath(REPORT_DIR).as_posix())
    SCRIPT_DIR = str(pathlib.PurePath(SCRIPT_DIR).as_posix())
    TEST_DIR = str(pathlib.PurePath(TEST_DIR).as_posix())
    LOG_DIR = str(pathlib.PurePath(LOG_DIR).as_posix())



def createRunallScript():
    script_path = BASE_DIR + "/runall.sh"
    script = open(script_path, "w")

    script.write("#!/bin/bash \n\n")

    script.write("DIR=`dirname \"$0\"` \n\n")

    script.write("for s in `ls $DIR/scripts/*.sh`; do\n")
コード例 #12
0
def get_root():
    root = pathlib.PurePath(
        input("What's the full path where you'd like the project? "))
    if not root.is_absolute():
        return get_root()  # we recall the function till we get what we want
    return root
コード例 #13
0
    def __init__(self, parent = None):
        """Create a wizard or the mainwindow"""
        self._parent = parent

        super().__init__(self._parent)

        print("runner/Main parent: ", self._parent, " -> self: ", self) if oPB.PRINTHIER else None

        self.logger = None
        self.args = self.get_args()
        self._log_level = None
        self._log_file = None
        self.translator = None

        # make it really quiet, part 1
        if self.args.quiet:
            self.args.nogui = True

        # instantiate configuration class
        confighandler.ConfigHandler(oPB.CONFIG_INI)

        # redirect system exception hook
        if not self.args.noexcepthook:
            sys.excepthook = self.excepthook

        # pre-instantiating the application, avoid some nasty OpenGL messages
        QApplication.setAttribute(QtCore.Qt.AA_UseOpenGLES, on = True)
        # create new application and install stylesheet
        self.app = QApplication(sys.argv)
        #self.install_stylesheet()

        # Create and display the splash screen, if in ui mode
        if not self.args.nogui:
            splash_pix = QPixmap(':/images/splash.png')
            self.splash = QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
            self.splash.setMask(splash_pix.mask())
            # splash.showMessage("opsi Package Builder " + oPB.PROGRAM_VERSION + " " + translate("Main", "is loading..."), QtCore.Qt.AlignCenter, QtCore.Qt.white)
            self.splash.show()
            self.app.processEvents()

        # Application name
        self.app.setOrganizationName("opsi Package Builder")
        self.app.setApplicationName("opsi Package Builder " + oPB.PROGRAM_VERSION)

        # save ourselves in main instance property to be easily accessd via qApp
        # i.e.
        # from PyQt5.QtWidgets import qApp
        # main = qApp.property("main")
        self.app.setProperty("main", self)

        if confighandler.ConfigHandler.cfg.log_always == "True":
            if self.args.log_file is not None:
                self._log_file = self.args.log_file
            else:
                self._log_file = confighandler.ConfigHandler.cfg.log_file

            if self.args.log_level.upper() != "NOTSET":
                self._log_level = self.args.log_level.upper()
            else:
                self._log_level = confighandler.ConfigHandler.cfg.log_level
        else:
            self._log_file = self.args.log_file
            if self.args.log_level.upper() == "NOTSET":
                self._log_level = "CRITICAL"
            else:
                self._log_level = self.args.log_level.upper()

        if self._log_file is not None:
            if not pathlib.Path(self._log_file).is_absolute():
                if platform.system() == "Windows":
                    self._log_file = str(pathlib.PurePath(oPB.WIN_TMP_PATH, self._log_file))
                if platform.system() in ["Darwin", "Linux"]:
                    self._log_file = str(pathlib.PurePath(oPB.UNIX_TMP_PATH, self._log_file))

        # overwrite development directory from config with command line arg
        if self.args.dev_dir is not None:
                confighandler.ConfigHandler.cfg.dev_dir = self.args.dev_dir

        # Initialize the logger and reroute QtCore messages to it
        self.logWindow = oPB.gui.logging.LogDialog(None, self, self._log_level)
        self.instantiate_logger(False)
        QtCore.qInstallMessageHandler(self.qt_message_handler)

        # log program version and user
        self.logger.info(80 * "-")
        self.logger.info("opsi PackageBuilder (MIT licensed) " + oPB.PROGRAM_VERSION)
        self.logger.info("Current user: "******"Command line arguments given:")
        for key, val in vars(self.args).items():
            self.logger.info("  " + key + ": " + str(val))
        if self._log_level not in ["DEBUG", "INFO", "SSHINFO", "WARNING", "ERROR", "CRITICAL", "SSH"]:
            self.logger.error("  Undefined log level: " + self._log_level)
            self.logger.error("  Log level has been set to ERROR")

        for elem in self.app.libraryPaths():
            self.logger.debug("QT5 library path: " + elem)

        # write config to log, if necessary
        confighandler.ConfigHandler.cfg.log_config()

        self.check_online_status()

        # -----------------------------------------------------------------------------------------
        # main ui dispatching

        # startup gui variant
        if not self.args.nogui:
            # install stylesheet
            self.install_stylesheet()

            # hide console window, but only under Windows and only if app is frozen
            if sys.platform.lower().startswith('win'):
                if getattr(sys, 'frozen', False):
                    hideConsole()

            # installing translators
            self.translator = Translator(self.app, "opsipackagebuilder")
            self.translator.install_translations(confighandler.ConfigHandler.cfg.language)

            # retranslate logWindow, as it is loaded before the translations
            self.logWindow.retranslateUi(self.logWindow)

            # create app icon
            app_icon = QtGui.QIcon()
            app_icon.addFile(':images/prog_icons/opb/package_16x16.png', QtCore.QSize(16, 16))
            app_icon.addFile(':images/prog_icons/opb/package_24x24.png', QtCore.QSize(24, 24))
            app_icon.addFile(':images/prog_icons/opb/package_32x32.png', QtCore.QSize(32, 32))
            app_icon.addFile(':images/prog_icons/opb/package_48x48.png', QtCore.QSize(48, 48))
            app_icon.addFile(':images/prog_icons/opb/package_64x64.png', QtCore.QSize(64, 64))
            app_icon.addFile(':images/prog_icons/opb/package_92x92.png', QtCore.QSize(92, 92))
            app_icon.addFile(':images/prog_icons/opb/package_128x128.png', QtCore.QSize(128, 128))
            app_icon.addFile(':images/prog_icons/opb/package_256x256.png', QtCore.QSize(256, 256))
            self.app.setProperty("prog_icon", app_icon)

            # startup program window
            self.mainWindow = main.MainWindowController(self.args)
            self.mainWindow.ui.showLogRequested.connect(self.logWindow.show)
            self.mainWindow.closeAppRequested.connect(self.logWindow.close)

            self.splash.finish(self.mainWindow.ui)

            # check for updates if configured
            if confighandler.ConfigHandler.cfg.updatecheck == "True":
                self.mainWindow.update_check()

            # run main app loop
            self.app.exec_()

        # only process commandline
        else:
            self.logger.info("No-GUI parameter set")

            # startup program window
            self.console = console.ConsoleController(self.args)

        # -----------------------------------------------------------------------------------------

        # unmount drive (if exist) after end of program
        if (oPB.NETDRV is not None) and oPB.NETDRV != "offline":
            ret = MapDrive.unMapDrive(oPB.NETDRV)
            if ret[0] != 0:
                self.logger.error("Error unmounting path: " + str(ret))
            else:
                self.logger.info("Network drive successfully unmounted")

        # exit and set return code
        self.logger.info("Exit code: " + str(oPB.EXITCODE))

        # show console window
        if not self.args.nogui:
            if sys.platform.lower().startswith('win'):
                if getattr(sys, 'frozen', False):
                    showConsole()

        sys.exit(oPB.EXITCODE)
コード例 #14
0
import requests
import pathlib
from win32com import client as win_client

# 工作目录(当前路径调试时需加上.parent)
BASE_DIR = str(pathlib.Path.cwd())
# BASE_DIR = str(pathlib.Path.cwd().parent)

CHROME_DRIVER_BASE_URL = "https://chromedriver.storage.googleapis.com"
EDGE_DRIVER_BASE_URL = "https://msedgedriver.azureedge.net"
CHROME_DRIVER_ZIP = "chromedriver_win32.zip"
EDGE_DRIVER_ZIP = "edgedriver_win64.zip"
CHROME_DRIVER = "chromedriver.exe"
EDGE_DRIVER = "msedgedriver.exe"

BROWSER_DRIVER_DIR = str(pathlib.PurePath(BASE_DIR, "driver"))
DRIVER_MAPPING_FILE = os.path.join(BASE_DIR, "config", "mapping.json")


def get_browser_version(file_path):
    """
    获取浏览器版本
    :param file_path: 浏览器文件路径
    :return: 浏览器大版本号
    """
    # 判断路径文件是否存在
    if not os.path.isfile(file_path):
        raise FileNotFoundError(f"{file_path} is not found.")
    win_obj = win_client.Dispatch('Scripting.FileSystemObject')
    version = win_obj.GetFileVersion(file_path)
コード例 #15
0
class DataAnalyser(object):
    '''
    Helper object for reading some common data types and exploring them.
    '''

    __readTypes = {
            'csv'       : _loadCSV
            ,'hdf5'     : _loadHDF
            ,'hdf'      : _loadHDF
            ,'rand'     : _loadRAND
            }

    __writeTypes = {
            'csv'   : _writeCSV
            ,'hdf'  : _writeHDF
            ,'hdf5' : _writeHDF
            }

    configDef = { # Configuration Definition
            'chopDirectory'         : {
                'default'   : pathlib.PurePath(os.path.curdir)
                ,'func'     : lambda p: isinstance(p,pathlib.PurePath)
                }
            ,'chopFilenamePrefix'   : {
                'default'   : 'chop'
                ,'func'     : lambda p: isinstance(p,str) and (len(p) >= 1 and len(p) <= 256)
                }
            ,'hdfKey'       : {
                'default'   : 'chop'
                ,'func'     : lambda p: isinstance(p,str) and (len(p) >= 1 and len(p) <= 64)
                }
            ,'chopFileFormat'   : {
                'default'   : 'csv'
                ,'func'     : lambda p: isinstance(p,str) and (p in DataAnalyser.getSupprtedFormats())
                }
            ,'indexSpecialName' : {
                'default'   : 'Internal Index'
                ,'func'     : lambda s: isinstance(s,str) and (len(s) >= 1 and len(s) <= 32)
                }
            }

    """Transformation Options"""
    __transforms = {
            'fixed'         : {
                'label'         : 'Fixed Translation Transform'
                ,'xTrans'       : 0.0
                ,'yTrans'       : 0.0
                ,'Enabled'      : False
                ,'func'         : fixedTranslation
                }
            ,'tagcentered'  : {
                'label'         : 'Active View-Centered Translation'
                ,'Enabled'      : False
                ,'func'         : viewCenteredTranslation
                }
            ,'angle'        : {
                'label'         : 'Active Angle Translation'
                ,'Enabled'      : False
                ,'func'         : angleTransform
                }
            }

    """ Curve Fit Options """
    __fits = {
            'linear'        : {
                'label'         : 'Linear Curve Fit'
                ,'Enabled'      : False
                ,'func'         : fitLinear
            }
        }

    def getDefaultConfig(self) :
        defaultDict = dict()
        for key in self.configDef :
            defaultDict[key] = self.configDef[key]['default']
        debug("DataAnalyser: getDefaultConfig: "+str(defaultDict))
        return defaultDict

    @staticmethod
    def getSupprtedFormats() :
        """ Get a list of supported file extensions that DataAnalyser can read
        and write.
        """
        return DataAnalyser.__readTypes.keys()

    @staticmethod
    def getFitOptions() :
        return DataAnalyser.__fits

    @staticmethod
    def getTransformOptions() :
        return DataAnalyser.__transforms

    def __init__(self, initObj=None, *args, **kwargs):
        self.isLoaded = False
        self.__config = configer.Configer(DataAnalyser.configDef)
        if initObj :
            ''' Initialize the data object with the first passed argument '''
            self.df = pd.DataFrame(initObj)
            self.isLoaded = True
            self.__setDefaultView()
        else :
            ''' Otherwise, empty'''
            pass
        if kwargs is not None :
            for key,val in kwargs.items() :
                self.__config[key] = val
        self.indexName = self.__config['indexSpecialName']

    def __setDefaultView(self):
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: __setDefaultView: no data loaded")
        self.windowStart = 0
        self.windowSize = self.df.shape[0]
        self.windowType = 'index'
        self.altIndexCol = 'index'

        """ Create a default view of the fist column only.  One column is
        technically okay, but, rest of implementation may assume otherwise.
        """
        """At minimum, there must be and index and one column, so we know
        that a good default is:"""
        currentView = [ [self.indexName, self.plotColumns[0] ] ]
        """If there is more than one plot-able column, choose the first two"""
        if len(self.plotColumns) > 2 :
            currentView = [self.plotColumns[0:2] ]
        """Keep forgetting the view is a list of lists!"""
        assert(isinstance(currentView[0],list))
        self.currentView = currentView

    def getConfig(self) :
        return self.__config.getConfig()

    def load(self, filetype='csv', filename=None, *args, **kwargs):
        """ Determine which file to load """
        if filename == None :
            raise Exception('No filename provided.')
        else :
            self.filename = filename

        ''' Load the file '''
        loadFunc = DataAnalyser.__readTypes[filetype]
        self.df = loadFunc(filename, *args, **kwargs)
        debug("load:Loaded new data file: head:"+str(self.df.head()))
        self.indexCol = kwargs['index_col']
        self.isLoaded = True
        self.cleanData()
        self.__setDefaultView()
        debug("load:default view:"+str(self.currentView))

    def cleanData(self) :
        #data.dropna(inplace=True)
        #data.reset_index(drop=True)
        """Pull out column names for plot-able column data"""
        #self.plotColumns = self.df.columns[ self.columnDataIsPlotable(e) for e in self.df.columns.tolist() ]
        self.plotColumns = list()
        for label in self.df.columns.tolist() :
            """For each lable, check against function, store"""
            if self.columnDataIsPlotable(label):
                self.plotColumns.append(label)
        debug("Got {} plot-able columns: {}".format(len(self.plotColumns),self.plotColumns))
        info("Not yet fully implemented.")

    def getColumnList(self) :
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: getColumnList: no data loaded")
        return self.getLabels()

    def getLabels(self):
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: getLabels: no data loaded")
        return self.plotColumns

    def columnDataIsPlotable(self, name):
        """Check data in the column named 'name' to make sure we can plot it
        Cannot plot anything but ints and floats.
        """
        return isinstance(self.df[name].iloc[0], Number)

    def isValidColumn(self, name):
        """Check if 'name' is in the DataFrame."""
        return (name in self.df.columns.tolist())

    def isIndexable(self, name):
        """ Determine whether or not the given column can be used as an index. """
        retVal = False
        if isValidColumn(name):
            series = self.df[name]
            """ Are all values unique? """
            if series.duplicated().any():
                """ Are the values ordered? """
                #TODO sort it?
                retVal = True
        return retVal

    def setAltIndexColumn(self, value):
        warn('DataAnalyser:setAltIndexColumn: Not implemented.')
        return
        debug("setAltIndexColumn: got new index name:'"+str(value)+"'")
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: setAltIndexColumn: no data loaded")
        """ Check for special option, index """
        if str(value) == str('index') :
            self.altIndexCol = 'index'
            return
        """ Otherwise, check value. """
        if not self.isValidColumn(value):
            raise Exception('Invalid column name:'+str(value))

        self.altIndexCol = value

    def validateView(self, viewList):
        """TODO Move view validation code here"""
        pass

    def setView(self, viewList=None, windowStart = None, windowSize=None, windowType=None) :
        """ Set the analyser view

        Parameters:
        viewList : list of views, each view is a tuple of x,y data columns/names
        """
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: no data loaded")
        if windowStart + windowSize > self.df.index.size :
            raise Exception("ERROR: window out of range")
        debug("Got new view list:"+str(viewList))
        goodLabels = self.getLabels()
        if viewList is not None :
            """ Okay, this is real data """
            newViewList = list()
            if isinstance(viewList, list) or isinstance(viewList,tuple) :
                for view in viewList:
                    debug("new view:"+str(view))
                    debug("new view type:"+str(type(view)))
                    if isinstance(view, list) or isinstance(view,tuple) :
                        """ viewList must be a list of lists """
                        validView = True
                        for item in view :
                            """ Validate each name in new view """
                            if item not in goodLabels :
                                warnwarn("View contains invalid labels for this data set, ignoring")
                                validView = False
                            else :
                                debug("item okay:"+str(item))
                        """ Okay, new view is validated, set currentview if passed."""
                        if validView :
                            newViewList.append(view)
                        else :
                            error("Was passed invalid view, ignoring it:"+str(view))
                    elif view is None :
                        """View maybe none if input not sanitized.  Fine, just
                        ignore it."""
                        warnwarn("View is 'None', ignoring.")
                    else :
                        raise TypeError("view must be a list or tuple, also.")
            else :
                raise TypeError("viewList must be a list or tuple.")
            """At this point, the new views have been validated."""
            """Accept the current view list, unless it's empty!"""
            if len(newViewList) < 1 :
                """New views failed validation"""
                debug("setView: All requested views failed validation.")
                if len(self.currentView) >= 1 :
                    """Old views exist, so, just keep that"""
                    debug("setView: Keeping old views:"+str(self.currentView))
                else :
                    raise TypeError("New view did not pass validation:"+str(viewList))
            else :
                """Okay, we can accept the new views that have passed validation"""
                self.currentView = newViewList
                debug("setView: Views updated: "+str(self.currentView))
        if windowStart is not None :
            self.windowStart = windowStart
        if windowSize is not None :
            self.windowSize = windowSize
        if windowType is not None :
            if not windowType == 'index' :
                warnwarn("Cannot set index type: " +str(windowType))
                warnwarn("The only supported index type is: index.")
            #self.windowType = windowType

    def getIndexLimits(self) :
        #TODO use alternative index
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: no data loaded")
        return {    'max' : self.df.index.max(),
                    'min' : self.df.index.min() }

    def getView(self) :
        """ Return the set of labels/parameters in current view
        """
        return self.currentView

    def getViewLimits(self) :
        """ Returns: min/max values for currently shown data, all view pairs
        """
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: getWindowLimits: no data loaded")
        viewLimitDictList = list()
        for view in self.view :
            x,y = self.view
            viewDict = {'xmin' : self.df[x].min(),
                        'ymin' : self.df[y].min(),
                        'xmax' : self.df[x].max(),
                        'ymax' : self.df[y].max() }
            viewLimitDictList.append(viewDict)
        return viewLimitDictList

    def setWindow(self, start, size) :
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: setWindow: no data loaded")
        self.windowStart = start
        self.windowSize = size

    def getWindow(self) :
        """ Returns: window start and window size
        """
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: getView: no data loaded")
        return ( self.windowStart , self.windowSize )

    def getStartEnd(self) :
        start,size = self.getWindow()
        end = start+size
        debug("start,end:%s,%s" % (str(start),str(end)))
        return (start, end)
    
    def getViewData(self) :
        """ Returns df of data in the current view
        """
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: getViewData: no data loaded")
        if self.windowType == 'index' :
            start = self.windowStart
            end = self.windowStart + self.windowSize
            mySlice = slice(start,end)
            dfList = list()
            debug("currentView:"+ str(self.currentView))
            """Make a list of DF Views to sent back to caller"""
            for viewpair in self.currentView :
                """Create DF views to return to caller"""
                df = self.df[ list(viewpair) ]      # make view
                df = df[mySlice]                    # get slice of the data requested
                dfList.append(df)
            """Now, pass all views through transforms"""
            newDFlist = self.doTransforms(dfList)
            """Now, pass all views through fitter"""
            #newDFlist = self.doFits(dfList)
            return newDFlist
        else:
            raise Exception('DataAnalyser window type ' + self.windowType + ' not implemented')

    def doTransforms(self, dfList):
        """Run all active transforms on the given DataFrame"""
        newDFlist = dfList
        #debug("doTransforms: available transforms: %s" , self.__transforms.keys())
        for transName in self.__transforms.keys():
            transConfig = self.__transforms[transName]
            if not transConfig['Enabled']:
                continue
            debug("Running transformation type: %s", transName)
            transFunc = transConfig['func']
            debug("Transformation Config: %s", transConfig)
            newDFlist = transFunc(newDFlist, **transConfig)
            #debug("Got transformation back: %s", newDFlist[0].head())
        return newDFlist

    def get2DData(self) :
        if not self.isLoaded :
            raise _DataNotLoaded("ERROR: DataAnalyser: get2DData: no data loaded")
        raise Exception('Not implemented yet.')

    def chop(self, dirpath=pathlib.PurePath(os.path.curdir), fmt='csv'
            ,hdfKey='chop', prefix='chop', **kwargs) :
        """ Cut out the current view and make a new file with those data points
        """
        if not self.isLoaded :
            raise _DataNotLoaded("Data not loaded.")
        xMin, xMax = (self.windowStart, self.windowStart + self.windowSize)
        filename = "{0}_{1:d}-{2:d}.{3}".format(prefix,xMin,xMax,fmt)
        chopFilePath = os.path.join( dirpath , filename )
        if fmt not in self.__writeTypes.keys() :
            raise TypeError('Unsupported data format: '+str(fmt))
        writeFunc = self.__writeTypes[fmt]
        info("DataAnalyser: chop: writing to file:"+chopFilePath)
        df = self.df
        altIndex = self.altIndexCol
        #TODO finish alternative indexes
        try :
            if altIndex == 'index' :
                writeFunc(df[xMin:xMax+1], chopFilePath, **kwargs)
            else :
                dfView = df[ (df[altIndex] >= xMin) & (df[altIndex] <= xMax) ]
                writeFunc( dfView ,chopFilePath, **kwargs)
        except Exception as e :
            error("ERROR: DataAnalyser: chop: failed writing to file:"+chopFilePath)
            print(e)

    def getStats(self, quantiles) :
        """ Show stats
        """
        #TODO make this just one result for all labels in all views
        dfList = self.getViewData()
        statList = list()
        for df in dfList :
            # Create a DF of statistics data by combining the describe nad quantile features of pandas DF
            statDF = pd.concat( [df.describe(),df.quantile(quantiles)] )
            statList.append( statDF )
        # pull off the first stat DF in the list
        statDF = statList.pop(0)
        for df in statList :
            # mash them together into one DF for simpler output
            newdf = statDF.T.append( df.T )
            statDF = newdf.T
        return [statDF]                                 #TODO need to return list for caller, could be changed

    def getCDFall(self, num_bins=100) :
        """ Return data for CDF
        """
        debug("DA: getCDF: starting...")
        debug("DA: getCDF: bins = %d" % num_bins)
        viewDFlist = self.getViewData()
        viewList = self.getView()
        debug("DA: viewDFlist type:" + str(type(viewDFlist)))
        debug("DA: viewList type:" + str(type(viewList)))
        cdfInfoLst = list()
        for viewpair in viewList :
            # for each "current view" of column/label pairs, do CDF of the ordinate
            yLabel = viewpair[1]                                # ordinate
            debug("getCDF: got ordinate label: {}".format(yLabel))
            ser = set()
            for df in viewDFlist :
                if yLabel in df.columns :
                    ser = df[yLabel]                            # get Series data
                    break
                else :
                    continue
            #debug("DA: Series: " + str(ser))
            counts, bin_edges = np.histogram(ser, bins=num_bins)
            #debug("DA: counts: " + str(counts))
            cdf = np.cumsum(counts)
            debug("DA: cdf: " + str(cdf))
            #assert not (cdf[-1] == 0)
            cdfInfoT = (yLabel, cdf, counts, bin_edges)
            debug("DA: getCDF: got CDF data: format: {}".format(type(cdfInfoT)))
            cdfInfoLst.append(cdfInfoT)
        return cdfInfoLst
コード例 #16
0
import pathlib, datetime

print("{:30} : {}".format("pathlib.PurePath('.')",
                          type(pathlib.PurePath("."))))
print("{:30} : {}".format("pathlib.PurePosixPath('.')",
                          type(pathlib.PurePosixPath("."))))
print("{:30} : {}".format("pathlib.PureWindowsPath('.')",
                          type(pathlib.PureWindowsPath("."))))

print()

print("{:30} : {}".format("pathlib.Path('.')", type(pathlib.Path("."))))
#print("{:30} : {}".format("pathlib.PosixPath('.')"         , type(pathlib.PosixPath("."))))
print("{:30} : {}".format("pathlib.WindowsPath('.')",
                          type(pathlib.WindowsPath("."))))

_path = pathlib.Path(".")
print(_path.cwd())
print()

for _dir in _path.iterdir():
    print("{} {:30} {:10} {:20} {}".format(
        "D" if _dir.is_dir() else "F", _dir.name,
        _dir.stat().st_size,
        datetime.datetime.fromtimestamp(
            _dir.stat().st_mtime).strftime("%Y/%m/%d %H:%M:%S"),
        _path.joinpath(_path.absolute(), _dir)))
コード例 #17
0
ファイル: day13.py プロジェクト: brianjp93/aoc2018
"""day13.py
"""
import pathlib

cwd = pathlib.Path(__file__).parent.absolute()
data = pathlib.PurePath(cwd, 'data')
test = pathlib.PurePath(cwd, 'test')
test2 = pathlib.PurePath(cwd, 'test2')


class MineCart:
    def __init__(self, fname):
        self.map = []
        self.carts = {}
        self.dindex = '^>v<'
        self.directions = {
            '^': (0, -1),
            '>': (1, 0),
            'v': (0, 1),
            '<': (-1, 0)
        }
        self.process(fname)

    def get(self, x, y):
        return self.map[y][x]

    def set(self, x, y, z):
        self.map[y][x] = z

    def next_cart(self, cart, curve, turn_count):
        out = cart
コード例 #18
0
 def as_path(self):
     """ returns the relative path construction for the child so that local can make use of it """
     return pathlib.PurePath(*self.parts)
コード例 #19
0
ファイル: ilastik.py プロジェクト: jburel/ilastik
 def isvalidpath_win(path):
     """Whether an element of PATH is "clean" on Windows."""
     patterns = "*/cplex/*", "*/guirobi/*", "/windows/system32/*"
     return any(map(pathlib.PurePath(path).match, patterns))
コード例 #20
0
                                               ext)).resolve()), **kwargs)


#        f.export("res/CMajor.mp3", format="mp3", bitrate='32k')
#        f.export("res/CMajor.ogg", format="ogg")
#        f.export("res/CMajor.flac", format="flac")

if __name__ == "__main__":
    import pathlib
    import Sampler
    import BaseWaveMaker
    import MusicTheory.EqualTemperament
    import MusicTheory.Scale
    import MusicTheory.tempo

    basepath = pathlib.PurePath('../res/scales/wav/')
    create_dirs(basepath)

    wm = BaseWaveMaker.BaseWaveMaker()
    sampler = Sampler.Sampler()
    scale = MusicTheory.Scale.Scale()
    timebase = MusicTheory.tempo.TimeBase()
    timebase.BPM = 120
    timebase.Metre = (4, 4)
    nv = MusicTheory.tempo.NoteValue(timebase)
    for key in ['C']:
        #    for key in ['C','C+','D','D+','E','F','F+','G','G+','A','A+','B']:
        print(key, 'メジャー・スケール')
        scale.Major(key=key)
        waves = []
        for f0 in scale.Frequencies:
コード例 #21
0
def initialize_gui():
    """Initializes the GUI 
    ---
    If user has not initialized a profile, the GUI will ask for the following data:
    - Username
    - Password
    - Plan
    - Protocol

    sudo protonvpn-gui
    - Will start the GUI without invoking cli()
    """
    check_root()
    change_file_owner(os.path.join(CONFIG_DIR, "protonvpn-gui.log"))
    gui_logger.debug(
        "\n______________________________________\n\n\tINITIALIZING NEW GUI WINDOW\n______________________________________\n"
    )

    interface = Gtk.Builder()

    posixPath = pathlib.PurePath(
        pathlib.Path(__file__).parent.absolute().joinpath(
            "resources/main.glade"))
    glade_path = ''

    for path in posixPath.parts:
        if path == '/':
            glade_path = glade_path + path
        else:
            glade_path = glade_path + path + "/"

    interface.add_from_file(glade_path[:-1])

    interface.connect_signals(Handler(interface))

    if len(get_gui_processes()) > 1:
        gui_logger.debug(
            "[!] Two processes were found. Displaying MessageDialog to inform user."
        )
        messagedialog_window = interface.get_object("MessageDialog")
        messagedialog_label = interface.get_object("message_dialog_label")
        messagedialog_spinner = interface.get_object("message_dialog_spinner")

        messagedialog_label.set_markup(
            "Another GUI process was found, attempting to end it...")
        messagedialog_spinner.show()
        messagedialog_window.show()

        time.sleep(1)
        # thread = Thread(target=kill_duplicate_gui_process, args=[interface, messagedialog_label, messagedialog_spinner])
        # thread.daemon = True
        # thread.start()

        response = kill_duplicate_gui_process()

        if not response['success']:
            messagedialog_label.set_markup(response['message'])
            messagedialog_spinner.hide()
            time.sleep(3)
            sys.exit(1)

        messagedialog_label.set_markup(response['message'])
        messagedialog_spinner.hide()

    if not os.path.isfile(CONFIG_FILE):
        gui_logger.debug(">>> Loading LoginWindow")
        window = interface.get_object("LoginWindow")
        dashboard = interface.get_object("DashboardWindow")
        dashboard.connect("destroy", Gtk.main_quit)
        window.show()
    else:
        window = interface.get_object("DashboardWindow")
        gui_logger.debug(">>> Loading DashboardWindow")
        window.connect("destroy", Gtk.main_quit)

        messagedialog_window = interface.get_object("MessageDialog")
        messagedialog_label = interface.get_object("message_dialog_label")
        interface.get_object("message_dialog_sub_label").hide()
        messagedialog_spinner = interface.get_object("message_dialog_spinner")

        messagedialog_label.set_markup("Loading...")
        messagedialog_spinner.show()
        messagedialog_window.show()

        objects = {
            "interface": interface,
            "messagedialog_window": messagedialog_window,
            "messagedialog_label": messagedialog_label,
            "messagedialog_spinner": messagedialog_spinner,
        }

        thread = Thread(target=load_content_on_start, args=[objects])
        thread.daemon = True
        thread.start()

    window.show()

    # Gdk.threads_init()
    # Gdk.threads_enter()
    GObject.threads_init()
    Gtk.main()
コード例 #22
0
def main():
    parser = argparse.ArgumentParser(
        description="PyTorch Object Detection Inference")
    parser.add_argument(
        "--config-file",
        default=
        "/private/home/fmassa/github/detectron.pytorch_v2/configs/e2e_faster_rcnn_R_50_C4_1x_caffe2.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--local_rank", type=int, default=0)
    parser.add_argument(
        "--ckpt",
        help=
        "The path to the checkpoint for test, default is the latest checkpoint.",
        default=None,
    )
    parser.add_argument(
        "opts",
        help="Modify config options using the command-line",
        default=None,
        nargs=argparse.REMAINDER,
    )

    args = parser.parse_args()

    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    distributed = num_gpus > 1

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()

    save_dir = cfg.OUTPUT_DIR  # ""
    filename = f'test_log_{pathlib.PurePath(args.ckpt) if args.ckpt is not None else "last"}.txt'
    test_result_filename = os.path.join(
        save_dir,
        pathlib.PurePath(filename).stem + '.pickle')
    logger = setup_logger("maskrcnn_benchmark",
                          save_dir,
                          get_rank(),
                          filename=filename)
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(cfg)

    logger.info("Collecting env info (might take some time)")
    logger.info("\n" + collect_env_info())

    model = build_detection_model(cfg)
    model.to(cfg.MODEL.DEVICE)

    # Initialize mixed-precision if necessary
    use_mixed_precision = cfg.DTYPE == 'float16'
    amp_handle = amp.init(enabled=use_mixed_precision, verbose=cfg.AMP_VERBOSE)

    output_dir = cfg.OUTPUT_DIR
    checkpointer = DetectronCheckpointer(cfg, model, save_dir=output_dir)
    ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else os.path.join(
        save_dir, args.ckpt)
    _ = checkpointer.load(ckpt, use_latest=args.ckpt is None)

    iou_types = ()
    if cfg.MODEL.MASK_ON:
        iou_types = iou_types + ("segm", )
    if cfg.MODEL.KEYPOINT_ON:
        iou_types = iou_types + ("keypoints", )
    output_folders = [None] * len(cfg.DATASETS.TEST)
    dataset_names = cfg.DATASETS.TEST
    if cfg.OUTPUT_DIR:
        for idx, dataset_name in enumerate(dataset_names):
            output_folder = os.path.join(cfg.OUTPUT_DIR, "inference",
                                         dataset_name)
            mkdir(output_folder)
            output_folders[idx] = output_folder
    data_loaders_val = make_data_loader(cfg,
                                        is_train=False,
                                        is_distributed=distributed)
    for output_folder, dataset_name, data_loader_val in zip(
            output_folders, dataset_names, data_loaders_val):
        inference(
            model,
            data_loader_val,
            dataset_name=dataset_name,
            iou_types=iou_types,
            box_only=False if cfg.MODEL.RETINANET_ON else cfg.MODEL.RPN_ONLY,
            bbox_aug=cfg.TEST.BBOX_AUG.ENABLED,
            device=cfg.MODEL.DEVICE,
            expected_results=cfg.TEST.EXPECTED_RESULTS,
            expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,
            output_folder=output_folder,
            test_result_filename=test_result_filename,
        )
        synchronize()
コード例 #23
0
ファイル: cache.py プロジェクト: jeromekelleher/stdpopsim-1
 def __attrs_post_init__(self):
     u = urllib.parse.urlparse(self.url)
     self._basename = pathlib.PurePath(u.path).name
コード例 #24
0
ファイル: path.py プロジェクト: radon-h2020/xopera-opera
 def build(cls, yaml_node):
     return cls(pathlib.PurePath(yaml_node.value), yaml_node.loc)
コード例 #25
0
async def gdrive_upload(bot, update):
    dl_url, custom_file_name, _, _ = await extract_link(
        update.reply_to_message, "GLEECH")
    txt = update.text
    logger.info("command is : " + txt)
    if txt.find("rename") > -1 and len(txt[txt.find("rename") + 7:]) > 0:
        custom_file_name = txt[txt.find("rename") + 7:]
        custom_file_name = await sanitize_file_name(custom_file_name)
        custom_file_name = await sanitize_text(custom_file_name)
    logger.info(dl_url)
    logger.info(custom_file_name)
    reply_message = await bot.send_message(
        text=Translation.DOWNLOAD_START,
        chat_id=update.chat.id,
        reply_to_message_id=update.message_id)
    tmp_directory_for_each_user = f"{Config.DOWNLOAD_LOCATION}{update.message_id}"
    if not os.path.isdir(tmp_directory_for_each_user):
        os.makedirs(tmp_directory_for_each_user)
    if custom_file_name is None:
        if dl_url.find('workers.dev') > -1 or dl_url.find('uploadbot') > -1:
            custom_file_name = dl_url[dl_url.rindex("/") + 1:]
        elif dl_url.find('seedr') > -1:
            custom_file_name = dl_url[int(dl_url.rindex("/")) +
                                      1:int(dl_url.rindex("?"))]
        else:
            if dl_url.find("/") > -1 and dl_url.find("?") > -1:
                m_url = dl_url[:dl_url.rindex("?")]
                custom_file_name = m_url[int(m_url.rindex("/")) + 1:]
            else:
                custom_file_name = dl_url[dl_url.rindex("/") + 1:]
        custom_file_name = urllib.parse.unquote(custom_file_name)
    download_directory = tmp_directory_for_each_user + "/" + custom_file_name
    async with aiohttp.ClientSession() as session:
        c_time = time.time()
        try:
            await download_coroutine(bot, session, dl_url, download_directory,
                                     reply_message.chat.id,
                                     reply_message.message_id, c_time)
        except:
            await bot.edit_message_text(text=Translation.SLOW_URL_DECED,
                                        chat_id=reply_message.chat.id,
                                        message_id=reply_message.message_id)
            return False
    if os.path.exists(download_directory):
        end_one = datetime.now()
        up_name = pathlib.PurePath(download_directory).name
        size = get_readable_file_size(get_path_size(download_directory))
        try:
            await bot.edit_message_text(
                text="¡¡¡Descarga completa!!!\n Subida en curso...",
                chat_id=reply_message.chat.id,
                message_id=reply_message.message_id)
        except Exception as e:
            logger.info(str(e))
            pass
        logger.info(f"Subiendo nombre: {up_name}")
        drive = gdriveTools.GoogleDriveHelper(up_name)
        gd_url, index_url = drive.upload(download_directory)
        button = []
        button.append([
            pyrogram.types.InlineKeyboardButton(text="☁️ Enlace de la nube",
                                                url=f"{gd_url}")
        ])
        if Config.INDEX_URL:
            logger.info(index_url)
            button.append([
                pyrogram.types.InlineKeyboardButton(text="ℹ️ Enlace de índice",
                                                    url=f"{index_url}")
            ])
        button_markup = pyrogram.types.InlineKeyboardMarkup(button)
        await bot.send_message(
            text=
            f"🤖: <b>{up_name}</b> se ha subido con éxito a su nube 🤒 \n📀 Tamaño: {size}",
            chat_id=update.chat.id,
            reply_to_message_id=update.message_id,
            reply_markup=button_markup)
        if Config.INDEX_URL:
            await generate_short_link(reply_message, index_url,
                                      custom_file_name)
        await reply_message.delete()
コード例 #26
0
    def __init__(self,
                 *,
                 name: Optional[str],
                 python_file: Optional[str],
                 comment: Optional[str],
                 writers: Set[str] = None):
        """
        ### Create the experiment

        :param name: name of the experiment
        :param python_file: `__file__` that invokes this. This is stored in
         the experiments list.
        :param comment: a short description of the experiment

        The experiments log keeps track of `python_file`, `name`, `comment` as
         well as the git commit.

        Experiment maintains the locations of checkpoints, logs, etc.
        """

        if python_file is None:
            python_file = self.__get_caller_file()

        self.lab = Lab(python_file)

        if name is None:
            file_path = pathlib.PurePath(python_file)
            name = file_path.stem

        if comment is None:
            comment = ''

        self.name = name
        self.experiment_path = self.lab.experiments / name

        self.check_repo_dirty = self.lab.check_repo_dirty

        self.configs_processor = None

        experiment_path = pathlib.Path(self.experiment_path)
        if not experiment_path.exists():
            experiment_path.mkdir(parents=True)

        self.run = Run.create(experiment_path=self.experiment_path,
                              python_file=python_file,
                              trial_time=time.localtime(),
                              comment=comment)

        repo = git.Repo(self.lab.path)

        self.run.commit = repo.head.commit.hexsha
        self.run.commit_message = repo.head.commit.message.strip()
        self.run.is_dirty = repo.is_dirty()
        self.run.diff = repo.git.diff()

        checkpoint_saver = self._create_checkpoint_saver()
        logger.internal().set_checkpoint_saver(checkpoint_saver)

        if writers is None:
            writers = {'sqlite', 'tensorboard'}

        if 'sqlite' in writers:
            logger.internal().add_writer(sqlite.Writer(self.run.sqlite_path))
        if 'tensorboard' in writers:
            logger.internal().add_writer(
                tensorboard.Writer(self.run.tensorboard_log_path))
コード例 #27
0
ファイル: main.py プロジェクト: CodePpoi/python-learn
# This is a sample Python script.

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.

import pathlib
import re
keyword = "apple"

# 获取索引文件路径
current_path = pathlib.PurePath(__file__).parent
dbfile = current_path.joinpath("search.db")

# 在索引文件中搜索关键字
with open(dbfile, encoding='utf-8') as f:
    for line in f.readlines():
        if re.search(keyword, line):
            print(line.rstrip())


def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'Hi, {name}')  # Press Ctrl+F8 to toggle the breakpoint.


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    print_hi('PyCharm')

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
コード例 #28
0
 def __init__(self, path):
     self.path = path
     super().__init__(pathlib.PurePath(path).name, [])
コード例 #29
0
def Data_Validation_Main():
    init()
    root_dir = "C:\\Seronet_Data_Validation"
    passing_msg = (
        "File is a valid Zipfile. No errors were found in submission. " +
        "Files are good to proceed to Data Validation")
    #############################################################################################
    summary_path = root_dir + file_sep + "Summary_of_all_Submissions.xlsx"
    summary_file = []
    if os.path.exists(summary_path):
        xls = pd.ExcelFile(summary_path, engine='openpyxl')
        for iterZ in xls.sheet_names:
            if len(summary_file) == 0:
                summary_file = pd.read_excel(summary_path,
                                             sheet_name=iterZ,
                                             engine='openpyxl')
            else:
                new_file = pd.read_excel(summary_path,
                                         sheet_name=iterZ,
                                         engine='openpyxl')
                summary_file = pd.concat([summary_file, new_file])
        summary_file.reset_index(inplace=True)
        summary_file.drop(["index"], inplace=True, axis=1)
    else:
        summary_file = pd.DataFrame(columns=[
            "Submission_Status", "Date_Of_Last_Status", "Folder_Location",
            "CBC_Num", "Date_Timestamp", "Submission_Name",
            "Validation_Status", "JIRA_Ticket", "Ticket_Status"
        ])
#############################################################################################
    if "Submission_Status" not in summary_file.columns:
        print(
            "Excel file Exists but is empty, will cause errors during validation"
        )
        print("Delete File and re-run program.  Terminating Data Validation")
        clear_empty_folders(root_dir)
        input("\n\nPress Enter to close window...")
        return {}

    Support_Files = get_subfolder(root_dir, "Support_Files")
    assay_folders = [i for i in Support_Files if os.path.isdir(i)]
    assay_data, assay_target = populate_assay_data(assay_folders)

    create_sub_folders(root_dir, "00_Uploaded_Submissions")
    create_sub_folders(root_dir, "01_Failed_File_Validation")
    create_sub_folders(root_dir, "02_Data_Validation_No_Errors")
    create_sub_folders(root_dir, "03_Data_Validation_Column_Errors")
    create_sub_folders(root_dir, "04_Data_Validation_Data_Errors")
    create_sub_folders(root_dir, "05_Data_Validation_Major_Errors")
    create_sub_folders(root_dir, "06_Data_Validation_Minor_Errors")

    summary_file = check_for_typo(summary_file)
    summary_file = move_minor_errors(summary_file, root_dir)
    summary_file = move_major_errors(summary_file, root_dir)
    summary_file = move_updated(summary_file, root_dir)
    summary_file = move_folder_to_uploaded(summary_file, root_dir)
    CBC_Folders = get_subfolder(root_dir, "Files_To_Validate")

    if len(CBC_Folders) == 0:
        print(
            "\nThe Files_To_Validate Folder is empty, no Submissions Downloaded to Process\n"
        )
#############################################################################################
    rename_CBC_folders(root_dir, CBC_Folders)
    CBC_Folders = get_subfolder(root_dir, "Files_To_Validate")

    sort_order = [int(i[-2:]) for i in CBC_Folders]
    sort_list = sorted(range(len(sort_order)), key=lambda k: sort_order[k])
    CBC_Folders = [CBC_Folders[i] for i in sort_list]

    all_res = []
    for iterT in CBC_Folders:
        date_folders = os.listdir(iterT)
        cbc_name = pathlib.PurePath(iterT).name
        res = [cbc_name, 0, 0]
        if len(date_folders) == 0:
            print("There are no submitted files for " + cbc_name)
            clear_dir(iterT)
            all_res.append(res)
            continue

        for iterD in date_folders:
            Date_path = iterT + file_sep + iterD
            Submissions_Names = os.listdir(Date_path)
            file_count = 0
            for iterS in Submissions_Names:
                file_str = (Date_path + file_sep + iterS)
                if os.path.isfile(file_str):
                    os.remove(file_str)
                    file_str = file_str.replace(
                        (root_dir + file_sep + "Files_To_Validate"), "")
                    print(
                        colored(
                            "\n##    File Validation has NOT been run for " +
                            file_str + "    ##", 'yellow'))
                    continue
                current_sub_object = File_Submission_Object.Submission_Object(
                    iterS[15:])
                curr_dict = populate_dict(validation_date, cbc_name, iterD,
                                          current_sub_object)

                file_check = summary_file.query(
                    "CBC_Num == @cbc_name and Date_Timestamp == @iterD and " +
                    " Submission_Name ==@current_sub_object.File_Name")
                if len(file_check) > 0:
                    curr_status = file_check['Submission_Status'].tolist()[0]
                    if curr_status in ["Updated"]:
                        curr_dict = file_check.to_dict('records')
                        curr_dict = curr_dict[0]
                        curr_dict["Submission_Status"] = "Pending Review"
                    else:
                        if curr_status in ["Downloaded"]:
                            pass
                        elif curr_status in ["Pending Review"]:
                            print(
                                "Submission Previously Updated and Reprocssed"
                                + " - Pending Manual Review of changes")
                        elif curr_status not in "Unknown":
                            print("Submission Status (" + curr_status +
                                  ") is Unknown." + " Possible mistyped")
                            print(
                                "Defaulting Status to Unknowm and skipping File"
                            )

                        shutil.rmtree(Date_path + file_sep + iterS)
                        continue

                orgional_path = Date_path + file_sep + iterS
                print("\n## Starting the Data Validation Proccess for " +
                      current_sub_object.File_Name + " ##")
                file_count = file_count + 1
                list_of_folders = os.listdir(Date_path + file_sep + iterS)
                result_message = get_result_message(list_of_folders, Date_path,
                                                    orgional_path, iterS)
                if (result_message == ''):
                    continue
                if result_message != passing_msg:
                    print("Submitted File FAILED the File-Validation Process")
                    print("With Error Message: " + result_message + "\n")
                    error_str = "Submission Failed File Validation"
                    move_file_and_update(orgional_path, root_dir,
                                         current_sub_object, curr_dict,
                                         "01_Failed_File_Validation",
                                         error_str)
                    curr_dict["Validation_Status"] = result_message
                    summary_file = summary_file.append(curr_dict,
                                                       ignore_index=True)
                    summary_file.drop_duplicates(
                        ["CBC_Num", "Date_Timestamp", "Submission_Name"],
                        keep='last',
                        inplace=True)
                    continue
#############################################################################################################################
                list_of_files = []
                if "UnZipped_Files" in list_of_folders:
                    list_of_files = os.listdir(Date_path + file_sep + iterS +
                                               file_sep + "Unzipped_Files")
                if len(list_of_files) == 0:
                    print(
                        "There are no files found within this submission to process"
                    )
                    continue
                Subpath = Date_path + file_sep + iterS
                current_sub_object = populate_object(current_sub_object,
                                                     Subpath, list_of_files,
                                                     Support_Files)
                current_sub_object.update_object(assay_data, "assay.csv")
                current_sub_object.update_object(assay_target,
                                                 "assay_target.csv")
                #############################################################################################################################
                col_err_count = len(current_sub_object.Column_error_count)
                if col_err_count > 0:
                    print(
                        colored(
                            "There are (" + str(col_err_count) +
                            ") Column Names in the submission that are wrong/missing",
                            'red'))
                    print(
                        colored(
                            "Not able to process this submission, please correct and resubmit \n",
                            'red'))
                    current_sub_object.write_col_errors((Subpath + file_sep))
                    error_str = "Submission has Column Errors, Data Validation NOT Preformed"
                    move_file_and_update(orgional_path, root_dir,
                                         current_sub_object, curr_dict,
                                         "03_Data_Validation_Column_Errors",
                                         error_str)
                    summary_file = summary_file.append(curr_dict,
                                                       ignore_index=True)
                    continue
                valid_cbc_ids = str(current_sub_object.CBC_ID)
                for file_name in current_sub_object.Data_Object_Table:
                    if file_name not in [
                            "submission.csv", "shipping_manifest.csv",
                            "assay.csv", "assay_target.csv"
                    ]:
                        if "Data_Table" in current_sub_object.Data_Object_Table[
                                file_name]:
                            try:
                                data_table = current_sub_object.Data_Object_Table[
                                    file_name]['Data_Table']
                                data_table, drop_list = current_sub_object.correct_var_types(
                                    file_name)
                                current_sub_object = Validation_Rules(
                                    re, datetime, current_sub_object,
                                    data_table, file_name, valid_cbc_ids,
                                    drop_list)
                            except Exception as e:
                                display_error_line(e)
                        else:
                            print(file_name +
                                  " was not included in the submission")


#############################################################################################################################
                check_ID_Cross_Sheet(current_sub_object, re)
                compare_tests(current_sub_object)
                try:
                    create_sub_folders(
                        Date_path,
                        iterS + file_sep + "Data_Validation_Results", True)
                    Data_Validation_Path = Date_path + file_sep + iterS + file_sep + "Data_Validation_Results"
                    current_sub_object.write_error_file(Data_Validation_Path +
                                                        file_sep)
                    if len(current_sub_object.Error_list) == 0:
                        error_str = "No Errors were Found during Data Validation"
                        move_file_and_update(orgional_path, root_dir,
                                             current_sub_object, curr_dict,
                                             "02_Data_Validation_No_Errors",
                                             error_str)
                    else:
                        error_str = ("Data Validation found " +
                                     str(len(current_sub_object.Error_list)) +
                                     " errors in the submitted files")
                        move_file_and_update(orgional_path, root_dir,
                                             current_sub_object, curr_dict,
                                             "04_Data_Validation_Data_Errors",
                                             error_str)
                except Exception as err:
                    print("An Error Occured when trying to write output file")
                    display_error_line(err)
                print("Validation for this File is complete")
                summary_file = summary_file.append(curr_dict,
                                                   ignore_index=True)
                summary_file.drop_duplicates(
                    ["CBC_Num", "Date_Timestamp", "Submission_Name"],
                    keep='last',
                    inplace=True)

            if file_count > 0:
                res[1] = res[1] + 1
                res[2] = res[2] + file_count
            if len(os.listdir(Date_path)) == 0:
                shutil.rmtree(Date_path)
        print(
            colored(
                "\nEnd of Current CBC Folder (" + cbc_name +
                "), moving to next CBC Folder", 'blue'))
        all_res.append(res)
        clear_dir(iterT)
    print("\nALl folders have been checked")
    print("Closing Validation Program")

    summary_file.sort_values(
        by=['CBC_Num', 'Date_Of_Last_Status', 'Date_Timestamp'], inplace=True)
    writer = pd.ExcelWriter(summary_path, engine='xlsxwriter')
    writer = write_excel_sheets(writer, summary_file,
                                "01_Failed_File_Validation",
                                "Failed_File_Validation")
    writer = write_excel_sheets(writer, summary_file,
                                "03_Data_Validation_Column_Errors",
                                "Column_Errors_Found")
    writer = write_excel_sheets(writer, summary_file,
                                "04_Data_Validation_Data_Errors",
                                "Failed_Data_Validation")
    writer = write_excel_sheets(writer, summary_file,
                                "06_Data_Validation_Minor_Errors",
                                "Pending_Feedback")
    writer = write_excel_sheets(writer, summary_file,
                                "05_Data_Validation_Major_Errors",
                                "Major_Errors_Found")
    writer = write_excel_sheets(writer, summary_file,
                                "02_Data_Validation_No_Errors",
                                "Passed_Data_Validation")
    writer = write_excel_sheets(writer, summary_file,
                                "00_Uploaded_Submissions",
                                "Uploaded_Submissions")

    writer.save()

    print("\n#### Validation Summary ####\n")
    for iterZ in range(len(all_res)):
        print("CBC :: " + all_res[iterZ][0] + ", Had " +
              str(all_res[iterZ][1]) + " date folders checked." +
              "  And processed " + str(all_res[iterZ][2]) +
              " unique Submissions")

    clear_empty_folders(root_dir)
    input("\n\nPress Enter to close window...")
コード例 #30
0
ファイル: features.py プロジェクト: tswsxk/CangJie
# coding: utf-8
# 2019/12/30 @ tongshiwei

from longling import path_append, loading
import pathlib
from .constants import CHAR, STROKE, RADICAL, GLYPH, PRON
from .glyph import character_glyph
from .stroke import token2stroke

__all__ = ["load_dict", "CDict", "token2radical", "char_features"]

DEFAULT_DICT = path_append(pathlib.PurePath(__file__).parents[2],
                           "meta_data",
                           "cdict.csv",
                           to_str=True)
DEFAULT_CDICT = [None]


def load_dict(dict_path):
    cdict = {}
    for line in loading(dict_path):
        cdict[line[CHAR]] = {}
        cdict[line[CHAR]][RADICAL] = line[RADICAL]
    return cdict


class CDict(object):
    def __init__(self, dict_obj=None, allow_missing=True):
        self._dict = dict_obj
        self._allow_missing = allow_missing