Example #1
0
    def create(self):
        """
        Create a new environment
        """
        output = Argv(
            self.conda,
            "create",
            "--yes",
            "--mkdir",
            "--prefix",
            self.path,
            "python={}".format(self.python),
        ).get_output(stderr=DEVNULL)
        match = re.search(
            r"\W*(.*activate) ({})".format(re.escape(str(self.path))), output
        )
        self.source = self.pip.source = (
            tuple(match.group(1).split()) + (match.group(2),)
            if match
            else ("activate", self.path)
        )
        conda_env = Path(self.conda).parent.parent / 'etc' / 'profile.d' / 'conda.sh'
        if conda_env.is_file() and not is_windows_platform():
            self.source = self.pip.source = CommandSequence(('source', conda_env.as_posix()), self.source)

        # install cuda toolkit
        try:
            cuda_version = float(int(self.session.config['agent.cuda_version'])) / 10.0
            if cuda_version > 0:
                self._install('cudatoolkit={:.1f}'.format(cuda_version))
        except Exception:
            pass
        return self
Example #2
0
    def saveMarketData(self, p_file_path="", p_save_type="CSV"):
        # Save the file to CSV
        my_file = Path(p_file_path)
        if my_file.is_file():
            # File exists
            with open(p_file_path, 'a') as myfile:
                self.market_data.to_csv(myfile,
                                        header=False,
                                        date_format='%Y-%m-%d %H:%M:%S')
#                 wr = csv.writer(myfile)
# Add a empty row to ensure the CSV is written starting on the next row
# Reverse it so it is date ascending (newest at the end)
#                 print(len(self.market_data))
#                 wr.writerows(list(reversed(self.market_data)))
        else:
            # Create directory paths
            os.makedirs(os.path.dirname(p_file_path), exist_ok=True)
            # No file so create new file
            with open(p_file_path, 'w') as myfile:
                self.market_data.to_csv(p_file_path,
                                        header=False,
                                        date_format='%Y-%m-%d %H:%M:%S')

#             with open(p_file_path, 'w') as myfile:
#                 wr = csv.writer(myfile)
#                 # Reverse it so it is date ascending (newest at the end)
#                 wr.writerows(reversed(self.market_data))
        return
Example #3
0
    def initialize_logging(self):
        logging_config = self._config.get("logging", None)
        if not logging_config:
            return False

        # handle incomplete file handlers
        deleted = []
        handlers = logging_config.get("handlers", {})
        for name, handler in list(handlers.items()):
            cls = handler.get("class", None)
            is_file = cls and "FileHandler" in cls
            if cls is None or (is_file and "filename" not in handler):
                deleted.append(name)
                del handlers[name]
            elif is_file:
                file = Path(handler.get("filename"))
                if not file.is_file():
                    file.parent.mkdir(parents=True, exist_ok=True)
                    file.touch()

        # remove dependency in deleted handlers
        root_logger = logging_config.get("root", None)
        loggers = list(logging_config.get(
            "loggers", {}).values()) + ([root_logger] if root_logger else [])
        for logger in loggers:
            handlers = logger.get("handlers", None)
            if not handlers:
                continue
            logger["handlers"] = [h for h in handlers if h not in deleted]

        extra = None
        if self._app:
            extra = {"app": self._app}
        initialize_log(logging_config, extra=extra)
        return True
Example #4
0
def import_datasources(path: str, sync: str, recursive: bool) -> None:
    """Import datasources from YAML"""
    from superset.utils import dict_import_export

    sync_array = sync.split(",")
    path_object = Path(path)
    files = []
    if path_object.is_file():
        files.append(path_object)
    elif path_object.exists() and not recursive:
        files.extend(path_object.glob("*.yaml"))
        files.extend(path_object.glob("*.yml"))
    elif path_object.exists() and recursive:
        files.extend(path_object.rglob("*.yaml"))
        files.extend(path_object.rglob("*.yml"))
    for file_ in files:
        logger.info("Importing datasources from file %s", file_)
        try:
            with file_.open() as data_stream:
                dict_import_export.import_from_dict(
                    yaml.safe_load(data_stream), sync=sync_array)
        except Exception as ex:  # pylint: disable=broad-except
            logger.error("Error when importing datasources from file %s",
                         file_)
            logger.error(ex)
Example #5
0
def get_IB_data(ID):
    # Returns two namedtuples, with IB training and testing data
    #   trn.X is training data
    #   trn.y is trainiing class, with numbers from 0 to 9
    #   trn.Y is training class, but coded as a 10-dim vector with one entry set to 1
    # similarly for tst
    nb_classes = 2
    data_file = Path('datasets/IB_data_' + str(ID) + '.npz')
    if data_file.is_file():
        data = np.load('datasets/IB_data_' + str(ID) + '.npz')
    else:
        create_IB_data(ID)
        data = np.load('datasets/IB_data_' + str(ID) + '.npz')

    (X_train, y_train), (X_test, y_test) = (data['X_train'],
                                            data['y_train']), (data['X_test'],
                                                               data['y_test'])

    Y_train = keras.utils.np_utils.to_categorical(y_train,
                                                  nb_classes).astype('float32')
    Y_test = keras.utils.np_utils.to_categorical(y_test,
                                                 nb_classes).astype('float32')

    Dataset = namedtuple('Dataset', ['X', 'Y', 'y', 'nb_classes'])
    trn = Dataset(X_train, Y_train, y_train, nb_classes)
    tst = Dataset(X_test, Y_test, y_test, nb_classes)
    del X_train, X_test, Y_train, Y_test, y_train, y_test
    return trn, tst
Example #6
0
def create_dbsqlite():
    path = os.path.join(ROOT_PATH, "db.sqlite")
    dbsqlite = Path(path)

    if not dbsqlite.is_file():
        with open(path, "wb"):
            os.utime(path, None)
Example #7
0
 def report_offline_session(cls, task, folder):
     filename = Path(folder) / cls.__offline_filename
     if not filename.is_file():
         return False
     with open(filename.as_posix(), 'rt') as f:
         i = 0
         while True:
             try:
                 line = f.readline()
                 if not line:
                     break
                 list_requests = json.loads(line)
                 for r in list_requests:
                     r.pop('task', None)
                 i += 1
             except StopIteration:
                 break
             except Exception as ex:
                 warning('Failed reporting log, line {} [{}]'.format(i, ex))
             batch_requests = events.AddBatchRequest(
                 requests=[events.TaskLogEvent(task=task.id, **r) for r in list_requests])
             if batch_requests.requests:
                 res = task.session.send(batch_requests)
                 if res and not res.ok():
                     warning("failed logging task to backend ({:d} lines, {})".format(
                         len(batch_requests.requests), str(res.meta)))
     return True
Example #8
0
 def _get_conda_sh(self):
     # type () -> Path
     base_conda_env = Path(
         self.conda).parent.parent / 'etc' / 'profile.d' / 'conda.sh'
     if base_conda_env.is_file():
         return base_conda_env
     for path in os.environ.get('PATH', '').split(
             select_for_platform(windows=';', linux=':')):
         conda = find_executable("conda", path=path)
         if not conda:
             continue
         conda_env = Path(
             conda).parent.parent / 'etc' / 'profile.d' / 'conda.sh'
         if conda_env.is_file():
             return conda_env
     return base_conda_env
Example #9
0
def search(request):
    updatedb(request)
    if request.method == 'POST':
        form = ResultSearch(request.POST)
        if form.is_valid():
            # Path.objects.filter(idsha256__contains=)
            sha256idquery = form.cleaned_data['id']
            # queryid=Path.objects.get(idsha256=sha256id)
            try:
                pathdata = Path.objects.get(idsha256=sha256idquery)  # Ok
                print pathdata.pathto
                print pathdata.idsha256
                checkfile = Paths(pathdata.pathto + '/reports/report.json')
                if checkfile.is_file():
                    context = dashboardfunc(pathdata.pathto +
                                            '/reports/report.json')
                    return render(request, 'analisys.html', context)
                else:
                    messages.add_message(request, messages.INFO,
                                         'ID not found')
                    return redirect(request.META['HTTP_REFERER'])
            except ObjectDoesNotExist:
                messages.add_message(request, messages.INFO, 'ID not found')
                return redirect(request.META['HTTP_REFERER'])

    else:
        return render(request, 'search.html')
Example #10
0
def resolve_local(document_loader, uri):
    pathpart, frag = urllib.parse.urldefrag(uri)
    pathobj = Path(pathpart).resolve()

    if pathobj.is_file():
        if frag:
            return "{}#{}".format(pathobj.as_uri(), frag)
        else:
            return pathobj.as_uri()

    sharepaths = [
        os.environ.get(
            "XDG_DATA_HOME",
            os.path.join(os.path.expanduser('~'), ".local", "share"))
    ]
    sharepaths.extend(
        os.environ.get("XDG_DATA_DIRS",
                       "/usr/local/share/:/usr/share/").split(":"))
    shares = [os.path.join(s, "commonwl", uri) for s in sharepaths]

    _logger.debug("Search path is %s", shares)

    for path in shares:
        if os.path.exists(path):
            return Path(uri).as_uri()
        if os.path.exists("{}.cwl".format(path)):
            return Path("{}.cwl".format(path)).as_uri()
    return None
Example #11
0
def list(request):
    if request.method == 'GET':
        dirtofile = request.GET.get('dir', '')
        if dirtofile != '':
            try:
                pathquery = dirtofile
                pathdata = Path.objects.get(pathto=pathquery)  # Ok
                print pathdata.pathto
                print pathdata.idsha256
                pathfile = pathdata.pathto + 'reports/report.json'
                checkfile = Paths(pathfile)
                if checkfile.is_file():
                    context = dashboardfunc(pathfile)
                    return render(request, 'analisys.html', context)
                else:
                    #filenotready = "File has not finished analysis"
                    #query = Path.objects.all()
                    #return render(request, 'list.html', {'filenotready': filenotready, 'query': query})
                    messages.add_message(request, messages.INFO,
                                         'File has not finished analysis')
                    return redirect(request.META['HTTP_REFERER'])
            except ObjectDoesNotExist:
                #filenotready = "File has not finished analysis"
                #query = Path.objects.all()
                #return render(request, 'list.html', {'filenotready': filenotready, 'query': query})
                messages.add_message(request, messages.INFO,
                                     'File has not finished analysis')
                return redirect(request.META['HTTP_REFERER'])
        else:
            updatedb(request)
            query = Path.objects.all()
            for i in query:
                print i.namefile
            return render(request, 'list.html', {'query': query})
Example #12
0
def updatedb(request):
    listoffiles = glob.glob("/home/fernando/.cuckoo/storage/analyses/*")
    for i in listoffiles:
        if "latest" not in i:
            checkfile = Paths(i + "/reports/report.json")
            if checkfile.is_file():
                with open(i + "/reports/report.json") as data_file:
                    try:
                        pathdata = Path.objects.get(pathto=i)
                        data = json.load(data_file)
                        filename = data['target']['file']['name']
                        file_sha = data['target']['file']['sha256']
                        dirpath = i + "/"
                        pathdata.namefile = filename
                        pathdata.idsha256 = file_sha
                        pathdata.pathto = dirpath
                        pathdata.save()
                    except ObjectDoesNotExist:
                        data = json.load(data_file)
                        filename = data['target']['file']['name']
                        file_sha = data['target']['file']['sha256']
                        dirpath = i + "/"
                        newinfo = Path(namefile=filename,
                                       idsha256=file_sha,
                                       pathto=dirpath)
                        newinfo.save()
    return "1"
Example #13
0
def get_IB_data(ID):

    # Returns two namedtuples, with IB training and testing data
    #   trn.X is training data
    #   trn.y is trainiing class, with numbers from 0 to 1
    #   trn.Y is training class, but coded as a 2-dim vector with one entry set to 1
    # similarly for tst

    nb_classes = 2
    data_file = Path('datasets/IB_data_'+str(ID)+'.npz')
    if data_file.is_file():
        data = np.load('datasets/IB_data_'+str(ID)+'.npz')
    else:
        create_IB_data(ID)
        data = np.load('datasets/IB_data_'+str(ID)+'.npz')
        

    X_train, y_train= torch.FloatTensor(data['X_train']), torch.LongTensor(data['y_train'].squeeze())
    X_test, y_test = torch.FloatTensor(data['X_test']), torch.LongTensor(data['y_test'].squeeze())
    # y_train (0.8*4096, 1)  y_test (0.2*4096,1)
    Y_train = one_hot_embedding(y_train, nb_classes)
    Y_test  = one_hot_embedding(y_test, nb_classes)


    Dataset = namedtuple('Dataset',['X','Y','y','nb_classes'])
    trn = Dataset(X_train, Y_train, y_train, nb_classes)
    tst = Dataset(X_test , Y_test, y_test, nb_classes)
    del X_train, X_test, Y_train, Y_test, y_train, y_test
    return trn, tst
Example #14
0
    def handle(cls, name, args):
        # type: (str, Dict[str, Any]) -> None
        """Perform the actual test.

        Relies on .cfnlintrc file to be located beside the Runway config file.

        """
        cfnlintrc = Path('./.cfnlintrc')

        if not cfnlintrc.is_file():
            LOGGER.error('File must exist to use this test: %s', cfnlintrc)
            sys.exit(1)

        # prevent duplicate log messages by not passing to the root logger
        logging.getLogger('cfnlint').propagate = False
        try:
            with argv(*['cfn-lint'] + args.get('cli_args', [])):
                runpy.run_module('cfnlint', run_name='__main__')
        except SystemExit as err:  # this call will always result in SystemExit
            if err.code != 0:  # ignore zero exit codes but re-raise for non-zero
                if not (yaml.safe_load(cfnlintrc.read_text())
                        or {}).get('templates'):
                    LOGGER.warning('cfnlintrc is missing a "templates" '
                                   'section which is required by cfn-lint')
                raise
def resolve_local(document_loader, uri):
    # type: (Loader, Text) -> Optional[Text]
    pathpart, frag = urllib.parse.urldefrag(uri)

    try:
        pathobj = Path(pathpart).resolve()
    except (WindowsError, OSError):
        _logger.debug("local resolver could not resolve %s", uri)
        return None

    if pathobj.is_file():
        if frag:
            return "{}#{}".format(pathobj.as_uri(), frag)
        return pathobj.as_uri()

    sharepaths = [
        os.environ.get(
            "XDG_DATA_HOME",
            os.path.join(os.path.expanduser('~'), ".local", "share"))
    ]
    sharepaths.extend(
        os.environ.get("XDG_DATA_DIRS",
                       "/usr/local/share/:/usr/share/").split(":"))
    shares = [os.path.join(s, "commonwl", uri) for s in sharepaths]

    _logger.debug("Search path is %s", shares)

    for path in shares:
        if os.path.exists(path):
            return Path(uri).as_uri()
        if os.path.exists("{}.cwl".format(path)):
            return Path("{}.cwl".format(path)).as_uri()
    return None
Example #16
0
def _check_and_get_file_obj(fpath):
    p = Path(fpath)
    if not p.parent.exists():
        p.parent.mkdir(parents=True)
    if p.is_file():
        return p.open('ab')
    return p.open('wb')
Example #17
0
def make_gdb_cmd(prog_full_path, crashed_pid):
    """Construct a command that uses the POSIX debugger (gdb) to turn a minidump file into a stack trace.

    Args:
        prog_full_path (Path): Full path to the program
        crashed_pid (int): PID of the program

    Returns:
        list: gdb command list
    """
    assert os.name == "posix"
    # On Mac and Linux, look for a core file.
    core_name = None
    if platform.system() == "Darwin":
        # Core files will be generated if you do:
        #   mkdir -p /cores/
        #   ulimit -c 2147483648 (or call resource.setrlimit from a preexec_fn hook)
        core_name = "/cores/core." + str(crashed_pid)
    elif platform.system() == "Linux":
        is_pid_used = False
        core_uses_pid_path = Path("/proc/sys/kernel/core_uses_pid")
        if core_uses_pid_path.is_file():
            with io.open(str(core_uses_pid_path),
                         "r",
                         encoding="utf-8",
                         errors="replace") as f:
                is_pid_used = bool(int(
                    f.read()[0]))  # Setting [0] turns the input to a str.
        core_name = "core." + str(crashed_pid) if is_pid_used else "core"
        core_name_path = Path.cwd() / core_name
        if not core_name_path.is_file():
            core_name_path = Path.home() / core_name  # try the home dir

    if core_name and core_name_path.is_file():
        dbggr_cmd_path = Path(__file__).parent / "gdb_cmds.txt"
        assert dbggr_cmd_path.is_file()  # pylint: disable=no-member

        # Run gdb and move the core file. Tip: gdb gives more info for:
        # (debug with intact build dir > debug > opt with frame pointers > opt)
        return [
            "gdb", "-n", "-batch", "-x",
            str(dbggr_cmd_path),
            str(prog_full_path),
            str(core_name)
        ]
    return []
Example #18
0
def get_file(path):
    result = Path('web') / path
    if result.is_file():
        return str(result)
    if result.is_dir() and (result / 'index.html').is_file():
        return str(result / 'index.html')
    # File was not found.
    return None
Example #19
0
def make_cdb_cmd(prog_full_path, crashed_pid):
    """Construct a command that uses the Windows debugger (cdb.exe) to turn a minidump file into a stack trace.

    Args:
        prog_full_path (Path): Full path to the program
        crashed_pid (int): PID of the program

    Returns:
        list: cdb command list
    """
    assert platform.system() == "Windows"
    # Look for a minidump.
    dump_name = Path.home() / "AppData" / "Local" / "CrashDumps" / (
        "%s.%s.dmp" % (prog_full_path.name, crashed_pid))

    if platform.uname()[2] == "10":  # Windows 10
        win64_debugging_folder = Path(os.getenv(
            "PROGRAMFILES(X86)")) / "Windows Kits" / "10" / "Debuggers" / "x64"
    else:
        win64_debugging_folder = Path(
            os.getenv("PROGRAMW6432")) / "Debugging Tools for Windows (x64)"

    # 64-bit cdb.exe seems to also be able to analyse 32-bit binary dumps.
    cdb_path = win64_debugging_folder / "cdb.exe"
    if not cdb_path.is_file():  # pylint: disable=no-member
        print()
        print(
            "WARNING: cdb.exe is not found - all crashes will be interesting.")
        print()
        return []

    if is_win_dumping_to_default():
        loops = 0
        max_loops = 300
        while True:
            if dump_name.is_file():
                dbggr_cmd_path = Path(__file__).parent / "cdb_cmds.txt"
                assert dbggr_cmd_path.is_file()  # pylint: disable=no-member

                cdb_cmd_list = []
                cdb_cmd_list.append("$<" + str(dbggr_cmd_path))

                # See bug 902706 about -g.
                return [
                    cdb_path, "-g", "-c", ";".join(cdb_cmd_list), "-z",
                    str(dump_name)
                ]

            time.sleep(0.200)
            loops += 1
            if loops > max_loops:
                # Windows may take some time to generate the dump.
                print(
                    "make_cdb_cmd waited a long time, but %s never appeared!" %
                    str(dump_name))
                return []
    else:
        return []
Example #20
0
    def _make_file_info(target: pathlib.Path,
                        arcname: Optional[str] = None) -> Dict[str, Any]:
        f = {}  # type: Dict[str, Any]
        f['origin'] = str(target)
        if arcname is not None:
            f['filename'] = arcname
        else:
            f['filename'] = str(target)
        if os.name == 'nt':
            fstat = os.stat(str(target), follow_symlinks=False)
            if target.is_symlink():
                f['emptystream'] = False
                f['attributes'] = fstat.st_file_attributes & FILE_ATTRIBUTE_WINDOWS_MASK  # type: ignore  # noqa
            elif target.is_dir():
                f['emptystream'] = True
                f['attributes'] = fstat.st_file_attributes & FILE_ATTRIBUTE_WINDOWS_MASK  # type: ignore  # noqa
            elif target.is_file():
                f['emptystream'] = False
                f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE  # type: ignore  # noqa
                f['uncompressed'] = fstat.st_size
        else:
            fstat = target.stat()
            if target.is_symlink():
                f['emptystream'] = False
                f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE  # type: ignore  # noqa
                f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (
                    stat.S_IFLNK << 16)
                f['attributes'] |= (stat.S_IMODE(fstat.st_mode) << 16)
            elif target.is_dir():
                f['emptystream'] = True
                f['attributes'] = stat.FILE_ATTRIBUTE_DIRECTORY  # type: ignore  # noqa
                f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (
                    stat.S_IFDIR << 16)
                f['attributes'] |= (stat.S_IMODE(fstat.st_mode) << 16)
            elif target.is_file():
                f['emptystream'] = False
                f['uncompressed'] = fstat.st_size
                f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE  # type: ignore  # noqa
                f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (
                    stat.S_IMODE(fstat.st_mode) << 16)

        f['creationtime'] = target.stat().st_ctime
        f['lastwritetime'] = target.stat().st_mtime
        f['lastaccesstime'] = target.stat().st_atime
        return f
Example #21
0
    def _get_script_info(cls, filepath, check_uncommitted=False, log=None):
        jupyter_filepath = cls._get_jupyter_notebook_filename()
        if jupyter_filepath:
            script_path = Path(os.path.normpath(jupyter_filepath)).absolute()
        else:
            script_path = Path(os.path.normpath(filepath)).absolute()
            if not script_path.is_file():
                raise ScriptInfoError(
                    "Script file [{}] could not be found".format(filepath))

        script_dir = script_path.parent

        def _log(msg, *args, **kwargs):
            if not log:
                return
            log.warning("Failed auto-detecting task repository: {}".format(
                msg.format(*args, **kwargs)))

        plugin = next((p for p in cls.plugins if p.exists(script_dir)), None)
        repo_info = DetectionResult()
        if not plugin:
            _log("expected one of: {}", ", ".join(
                (p.name for p in cls.plugins)))
        else:
            try:
                repo_info = plugin.get_info(str(script_dir),
                                            include_diff=check_uncommitted)
            except Exception as ex:
                _log("no info for {} ({})", script_dir, ex)
            else:
                if repo_info.is_empty():
                    _log("no info for {}", script_dir)

        repo_root = repo_info.root or script_dir
        working_dir = cls._get_working_dir(repo_root)
        entry_point = cls._get_entry_point(repo_root, script_path)

        script_info = dict(
            repository=furl(repo_info.url).remove(username=True,
                                                  password=True).tostr(),
            branch=repo_info.branch,
            version_num=repo_info.commit,
            entry_point=entry_point,
            working_dir=working_dir,
            diff=repo_info.diff,
        )

        messages = []
        if repo_info.modified:
            messages.append(
                "======> WARNING! UNCOMMITTED CHANGES IN REPOSITORY {} <======"
                .format(script_info.get("repository", "")))

        if not any(script_info.values()):
            script_info = None

        return ScriptInfoResult(script=script_info, warning_messages=messages)
Example #22
0
 def checkTLE(self, index):
     """Check if TLE exists for given satellite, else call getnewtle()
     
     Args:
         index (str): NORAD ID of satellite
     """
     TLEfileExists = Path("./TLE/" + index + '.txt')
     if (TLEfileExists.is_file() == False):
         self.getnewtle(index)
Example #23
0
def test_preprocess_saves_result_in_the_right_folder(path_to_config,
                                                     make_tmp_folder):
    yass.set_config(path_to_config, make_tmp_folder)
    standarized_path, standarized_params, _ = preprocess.run()

    expected = Path(make_tmp_folder, 'preprocess', 'standarized.bin')

    assert str(expected) == standarized_path
    assert expected.is_file()
Example #24
0
def is_resource(package, name):
    """True if name is a resource inside package.

    Directories are *not* resources.
    """
    package = _get_package(package)
    _normalize_path(name)
    try:
        package_contents = set(contents(package))
    except OSError as error:
        if error.errno not in (errno.ENOENT, errno.ENOTDIR):
            # We won't hit this in the Python 2 tests, so it'll appear
            # uncovered.  We could mock os.listdir() to return a non-ENOENT or
            # ENOTDIR, but then we'd have to depend on another external
            # library since Python 2 doesn't have unittest.mock.  It's not
            # worth it.
            raise  # pragma: nocover
        return False
    if name not in package_contents:
        return False
    # Just because the given file_name lives as an entry in the package's
    # contents doesn't necessarily mean it's a resource.  Directories are not
    # resources, so let's try to find out if it's a directory or not.
    path = Path(package.__file__).parent / name
    if path.is_file():
        return True
    if path.is_dir():
        return False
    # If it's not a file and it's not a directory, what is it?  Well, this
    # means the file doesn't exist on the file system, so it probably lives
    # inside a zip file.  We have to crack open the zip, look at its table of
    # contents, and make sure that this entry doesn't have sub-entries.
    archive_path = package.__loader__.archive  # type: ignore
    package_directory = Path(package.__file__).parent
    with ZipFile(archive_path) as zf:
        toc = zf.namelist()
    relpath = package_directory.relative_to(archive_path)
    candidate_path = relpath / name
    for entry in toc:  # pragma: nobranch
        try:
            relative_to_candidate = Path(entry).relative_to(candidate_path)
        except ValueError:
            # The two paths aren't relative to each other so we can ignore it.
            continue
        # Since directories aren't explicitly listed in the zip file, we must
        # infer their 'directory-ness' by looking at the number of path
        # components in the path relative to the package resource we're
        # looking up.  If there are zero additional parts, it's a file, i.e. a
        # resource.  If there are more than zero it's a directory, i.e. not a
        # resource.  It has to be one of these two cases.
        return len(relative_to_candidate.parts) == 0
    # I think it's impossible to get here.  It would mean that we are looking
    # for a resource in a zip file, there's an entry matching it in the return
    # value of contents(), but we never actually found it in the zip's table of
    # contents.
    raise AssertionError('Impossible situation')
Example #25
0
def generate_new_csv_overall():
    my_file = Path("./stats/instaPyStats.csv")
    if my_file.is_file():
        return 'The overall File already exists'
    else:
        filename = "./stats/instaPyStats.csv"
        with open(filename, mode='a') as insta_file:
            stat_writer = csv.writer(insta_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            stat_writer.writerow(['DATE', 'USER_NAME', 'FOLLOWERS', 'FOLLOWING'])
        return "New overall history file created"
Example #26
0
def recursive_search(path):
    # print(path)
    ign_flag = 0
    ignore_file = Path(os.path.join(path, ".ignore"))
    if ignore_file.is_file():
        ign_flag = 1
        text_file.write("# ignored - " + path + "\n")
    # local set

    skip_node_flag = 0
    skip_node_file = Path(os.path.join(path, ".ignore_node"))
    if skip_node_file.is_file():
        skip_node_flag = 1
        text_file.write("# ignored directory and all subdirs - " + path + "\n")
        return None
    local_source_files = set()
    for file in os.listdir(path):
        if (file == ".") or (file == ".."):
            continue
        full_name = os.path.join(path, file)
        if os.path.isdir(full_name) and not os.path.islink(path):
            recursive_search(full_name)
        if ign_flag == 0:
            if re.search("\\.(c|cpp|cxx)$", file) is not None:
                local_source_files.add(full_name)


#         	    local_source_files.add(file)
            elif re.search("\\.(h|hpp)$", file) is not None:
                include_dirs.add(path)
    if len(local_source_files) != 0:
        #	dir_var = "set(DIR_" + path + ")"
        #	print(dir_var)
        path_g = path[skip_chars:]
        path_g = path_g.replace("/", "_")
        path_g = path_g.replace("\\", "_")
        group_name = "GROUP_" + path_g.upper()
        source_files.add(group_name)
        text_file.write("\nset(" + group_name + "\n")
        for fname in sorted(local_source_files):
            fname = fname[skip_chars:].replace("\\", "/")
            text_file.write("\t\t" + fname + "\n")
        text_file.write(")" + "\n")
Example #27
0
def import_config():
    """import the configuration for the dimension reduction in this file """
    module_dir = os.path.dirname(os.path.realpath(__file__))
    running_dir = os.getcwd()

    module_file = Path(os.path.join(module_dir, "algorithms.json"))
    running_file = Path(os.path.join(running_dir, "algorithms.json"))

    def rfile(f):
        with f.open() as param:
            config_ = json.load(param)
        return config_

    if running_file.is_file():
        return rfile(running_file)
    elif module_file.is_file():
        return rfile(module_file)
    else:
        raise IOError, "No such file 'algorithms.json'" 
Example #28
0
def generate_new_csv(usr_name):
    my_file = Path("./stats/" + usr_name + ".csv")
    if my_file.is_file():
        return 'The history file for "' + usr_name + '" already exists'
    else:
        filename = './stats/' + usr_name + '.csv'
        with open(filename, mode='a') as insta_file:
            stat_writer = csv.writer(insta_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            stat_writer.writerow(['DATE', 'FOLLOWERS', 'FOLLOWING'])
        return "New history file created for user: " + usr_name
Example #29
0
    def update_model_desc(self, new_model_desc_file=None):
        """ Change the task's model_desc """
        execution = self._get_task_property('execution')
        p = Path(new_model_desc_file)
        if not p.is_file():
            raise IOError('mode_desc file %s cannot be found' % new_model_desc_file)
        new_model_desc = p.read_text()
        model_desc_key = list(execution.model_desc.keys())[0] if execution.model_desc else 'design'
        execution.model_desc[model_desc_key] = new_model_desc

        res = self._edit(execution=execution)
        return res.response
Example #30
0
def getFilesFromFolderByFiletype(folder_selected, filetype):
    files_from_folder = []

    for root, dirs, files in os.walk(folder_selected):
        for file in files:
            if file.endswith(filetype):
                input_file = os.path.join(root, file)
                output_file = Path(getOutputFileFromInputFile(input_file))

                if not output_file.is_file():
                    files_from_folder.append(input_file)
    return files_from_folder
Example #31
0
def parse_file(fname):
    aminoacids = []
    the_file = Path(fname)
    if the_file.is_file():
        with gzip.open(fname, 'rb') as fin:
            for line in fin:
                if query_ca.match(line):
                    atname, resname, resid, xyz = atom_from_string(line)
                    aminoacids.append(AlphaCarbon(resname, xyz))
                elif line.startswith('TER'):
                    aminoacids.append(None)
    return aminoacids
Example #32
0
def cmd_run(path):
    """
    Runs an appliction.
    """
    os.chdir(path)
    package = Path("./package.json")
    if not package.is_file():
        raise Exception("Invalid package: no package.json file")

    package = json.load(package.open())

    if "engines" not in package or package["engines"] == {}:
        raise Exception("Invalid package: no engines specified")

    r = requests.get("%s/index.json" % Particle.REPO)
    r.raise_for_status()
    remote_particles = r.json()["particles"]

    variables = {}
    for name, range_ in package["engines"].items():
        p = Particle.get_local(name, range_)
        if not p:
            # if auto_fetch:
            if name in remote_particles:
                v = semver.max_satisfying(remote_particles[name], range_, False)
                if v:
                    print("Downloading %s %s..." % (name, v))
                    p = Particle.fetch(name, v)
                else:
                    print("Cannot satisfy %s (%s), aborting." % (name, range_))
                    sys.exit(1)
            else:
                print("No particle named %s exists, aborting." % name)
                sys.exit(1)
        variables["$" + name.upper().replace("-", "_")] = str(p.main)

    pattern = re.compile('|'.join(map(re.escape, variables.keys())))

    if "lepton" not in package:
        raise Exception("Invalid package: no lepton key in particle.json")
    elif "run" not in package["lepton"]:
        raise Exception("Invalid package: no lepton.run key in particle.json")

    args = package["lepton"]["run"]
    args = pattern.sub(lambda x: variables[x.group()], args)
    args = shlex.split(args)
    print("Resulting command line: %r" % args)
    print("Current dir: %s" % os.getcwd())
    os.execvp(args[0], args)
Example #33
0
def import_dashboards(path, recursive=False):
    """Import dashboards from JSON"""
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.json'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.json'))
    for f in files:
        logging.info('Importing dashboard from file %s', f)
        try:
            with f.open() as data_stream:
                dashboard_import_export.import_dashboards(
                    db.session, data_stream)
        except Exception as e:
            logging.error('Error when importing dashboard from file %s', f)
            logging.error(e)
Example #34
0
def get_file(path):
    filepath = Path('site') / path
    if filepath.is_file():
        return filepath

    for format in PAGE_FORMATS:
        index_file = filepath / ('index' + format)
        if filepath.is_dir() and index_file.is_file():
            return index_file

    if filepath.suffix == '.css':
        style_file = filepath.parent / (filepath.stem + '.styl')
        if style_file.exists():
            return style_file

    if filepath.suffix == '.js':
        coffee_file = filepath.parent / (filepath.stem + '.coffee')
        if coffee_file.exists():
            return coffee_file

    return None
Example #35
0
def resolve_local(document_loader, uri):
    pathpart, frag = urllib.parse.urldefrag(uri)
    pathobj = Path(pathpart).resolve()

    if pathobj.is_file():
        if frag:
            return "{}#{}".format(pathobj.as_uri(), frag)
        return pathobj.as_uri()

    sharepaths = [os.environ.get("XDG_DATA_HOME", os.path.join(
        os.path.expanduser('~'), ".local", "share"))]
    sharepaths.extend(os.environ.get(
        "XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":"))
    shares = [os.path.join(s, "commonwl", uri) for s in sharepaths]

    _logger.debug("Search path is %s", shares)

    for path in shares:
        if os.path.exists(path):
            return Path(uri).as_uri()
        if os.path.exists("{}.cwl".format(path)):
            return Path("{}.cwl".format(path)).as_uri()
    return None
def Deepzoom(image_or_directory_path,create_static_cache=False,**kwargs):
    """
    Returns a Deepzoom interface corresponding to the given image. Can accept
    either a filepath (and will read tiles on the fly) or a directory path (that
    contains an image with the same name as the directory) that contains/will
    contain a static DeepZoom image directory.
    :param image_or_directory_path: String or Pathlib object
    :param create_static_cache: If True, creates a static DeepZoom image
     directory directory structure *around* the given image (or in the given
     directory). This is done lazily, saving each tile as it's requested.
    :param kwargs: Same as DeepzoomInterface
    :return: DeepZoom
    """
    p = Path(image_or_directory_path).resolve()
    img = None
    if p.is_file():
        img = _ImageFactory(p)
    elif p.is_dir():
        fList = list(p.glob('%s.*'%p.name))
        if len(fList)==0:
            raise IOError('Invalid Deepzoom directory (%s). '
                          'Must contain and image named (%s) to be valid.'
                          ''%(p,'%s.<EXT>'%p.name))
        for f in fList:
            try: img = _ImageFactory(f)
            except IOError: pass

    if img is None:
        raise IOError('Invalid Deepzoom target (%s). '
                      'Not a supported image format.'%(p))

    if create_static_cache:
        # do something to DeepZoomGenerator so that it saves on get_tile()
        dzGen = _CachedInterface(img,**kwargs)
    else:
        dzGen = _DeepzoomInterface(img,**kwargs)
    return dzGen
Example #37
0
def import_datasources(path, sync, recursive=False):
    """Import datasources from YAML"""
    sync_array = sync.split(',')
    p = Path(path)
    files = []
    if p.is_file():
        files.append(p)
    elif p.exists() and not recursive:
        files.extend(p.glob('*.yaml'))
        files.extend(p.glob('*.yml'))
    elif p.exists() and recursive:
        files.extend(p.rglob('*.yaml'))
        files.extend(p.rglob('*.yml'))
    for f in files:
        logging.info('Importing datasources from file %s', f)
        try:
            with f.open() as data_stream:
                dict_import_export_util.import_from_dict(
                    db.session,
                    yaml.safe_load(data_stream),
                    sync=sync_array)
        except Exception as e:
            logging.error('Error when importing datasources from file %s', f)
            logging.error(e)
Example #38
0
def test_has_files(tmp_crumb):
    assert not op.exists(tmp_crumb.path)

    assert not tmp_crumb.has_files()

    values_dict = {'session_id': ['session_{:02}'.format(i) for i in range( 2)],
                   'subject_id': ['subj_{:03}'.format(i)    for i in range( 3)],
                   'modality':   ['anat'],
                   'image':      ['mprage1.nii', 'mprage2.nii', 'mprage3.nii'],
                   }

    paths = mktree(tmp_crumb, list(ParameterGrid(values_dict)))

    assert op.exists(tmp_crumb.split()[0])

    assert not tmp_crumb.has_files()

    pa = Path(str(paths[0]))
    pa.rmdir()
    pa.touch()

    assert pa.exists()
    assert pa.is_file()
    assert tmp_crumb.has_files()
Example #39
0
def cmd_run(path):
    """
    Runs an appliction.
    """
    os.chdir(path)
    package = Path("./package.json")
    if not package.is_file():
        raise Exception("Invalid package: no package.json file")

    package = json.load(package.open())

    if "engines" not in package or package["engines"] == {}:
        raise Exception("Invalid package: no engines specified")

    variables = {}
    for name, version in package["engines"].items():
        p = Particle.get_local(name, version)
        if not p:
            # if auto_fetch:
            print("Downloading %s..." % name)
            p = Particle.fetch(name, version)
        variables["$" + name.upper().replace("-", "_")] = str(p.main)

    pattern = re.compile('|'.join(map(re.escape, variables.keys())))

    if "lepton" not in package:
        raise Exception("Invalid package: no lepton key in particle.json")
    elif "run" not in package["lepton"]:
        raise Exception("Invalid package: no lepton.run key in particle.json")

    args = package["lepton"]["run"]
    args = pattern.sub(lambda x: variables[x.group()], args)
    args = shlex.split(args)
    print("Resulting command line: %r" % args)
    print("Current dir: %s" % os.getcwd())
    os.execvp(args[0], args)