コード例 #1
0
    def search_current(self, value):
        """
            Search in the current folder

            :param value: The value use to search the file

            :type value: string

            :return: The absolute path of the .pyg file
            :rtype:  Path
        """
        #Set the filename
        file_name = value

        #Check if the value arg don't end with .Pygnata
        if not value.endswith(PygnataProvider.extension):
            file_name = file_name + PygnataProvider.extension

        #Check if the file exist in the current directory
        current_folder = path.getcwd()
        local_pyg_files = os.listdir(current_folder)
        if file_name in local_pyg_files:
            return os.path.abspath(current_folder / path(file_name))
        else:
            #Search in the .pygnata folder
            return self.search_local(value)
コード例 #2
0
ファイル: utils.py プロジェクト: jeromedockes/NiMARE
def _download_zipped_file(url, filename=None):
    """
    Download from a URL to a file.
    """
    if filename is None:
        data_dir = op.abspath(op.getcwd())
        filename = op.join(data_dir, url.split('/')[-1])
    # NOTE the stream=True parameter
    req = requests.get(url, stream=True)
    with open(filename, 'wb') as f_obj:
        for chunk in req.iter_content(chunk_size=1024):
            if chunk:  # filter out keep-alive new chunks
                f_obj.write(chunk)
    return filename
コード例 #3
0
ファイル: tbb-install.py プロジェクト: von/scripts
    def unpack_dmg(self, bundle_path):
        """Unpack DMG

        :param bundle_path: path to bundle to unpack
        :returns unpacked_bundle: path to unpacked bundle
        """
        cwd = path.getcwd()
        mount_info = sh.hdiutil("attach",
                                "-noverify",  # Avoid output
                                "-mountroot", cwd,
                                "-nobrowse",  # Make invisible to Finder
                                bundle_path)
        dev, hint, mount_point = [s.strip() for s in mount_info.split("\t")]
        atexit.register(sh.hdiutil, "detach", mount_point, "-force")
        unpacked_bundle = path(mount_point) / \
            path(self.config["unpacked_bundle"])
        if not unpacked_bundle.exists():
            raise RuntimeError("Could not find unpacked bundle \"{}\"".
                               format(unpacked_bundle))
        return unpacked_bundle
コード例 #4
0
ファイル: pavement.py プロジェクト: Irrelon/bespinclient
def jsdocs(options):
    """Generate API documentation using the jsdoc-toolkit."""
    _fetchfile("jsdoc-toolkit", options.dest_dir, options.download_location,
        options.download_url)
        
    outputdir = options.builddir / "docs" / "api"
    if outputdir.exists():
        outputdir.rmtree()
    outputdir.makedirs()
    sourcedir = (path.getcwd() / "frameworks" / "bespin").abspath()
    
    command = ("java -jar jsrun.jar app/run.js -a "
                    "--directory=%s "
                    "--encoding=utf-8 "
                    "--recurse=10 "
                    "--securemodules "
                    "--template=%s/templates/jsdoc "
                    "--verbose "
                    "%s") % (outputdir.abspath(), options.dest_dir.abspath(), sourcedir)
                    
    sh(command, cwd=options.dest_dir)
コード例 #5
0
def jsdocs(options):
    """Generate API documentation using the jsdoc-toolkit."""
    _fetchfile("jsdoc-toolkit", options.dest_dir, options.download_location,
               options.download_url)

    outputdir = options.builddir / "docs" / "api"
    if outputdir.exists():
        outputdir.rmtree()
    outputdir.makedirs()
    sourcedir = (path.getcwd() / "frameworks" / "bespin").abspath()

    command = ("java -jar jsrun.jar app/run.js -a "
               "--directory=%s "
               "--encoding=utf-8 "
               "--recurse=10 "
               "--securemodules "
               "--template=%s/templates/jsdoc "
               "--verbose "
               "%s") % (outputdir.abspath(), options.dest_dir.abspath(),
                        sourcedir)

    sh(command, cwd=options.dest_dir)
コード例 #6
0
ファイル: travis-failure.py プロジェクト: hjanetzek/core
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# travis-failure.py - build script
# Written in 2015 by MNMLSTC
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty. You should have
# received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.

from __future__ import print_function
from os.path import getcwd
from os.path import join
from os import walk


def find(name, path):
    return [join(root, name) for root, _, files in walk(path) if name in files]


if __name__ == '__main__':
    for path in find('LastTest.log', getcwd()):
        with open(path) as log:
            for line in log:
                print(line, end='')
コード例 #7
0
ファイル: travis-failure.py プロジェクト: hjanetzek/core
#!/usr/bin/env python
# -*- coding: utf-8 -*-

# travis-failure.py - build script
# Written in 2015 by MNMLSTC
# To the extent possible under law, the author(s) have dedicated all copyright
# and related and neighboring rights to this software to the public domain
# worldwide. This software is distributed without any warranty. You should have
# received a copy of the CC0 Public Domain Dedication along with this software.
# If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.

from __future__ import print_function
from os.path import getcwd
from os.path import join
from os import walk


def find(name, path):
    return [join(root, name) for root, _, files in walk(path) if name in files]


if __name__ == "__main__":
    for path in find("LastTest.log", getcwd()):
        with open(path) as log:
            for line in log:
                print(line, end="")
コード例 #8
0
dryice=dryice.tool:main
""")

options(
    version=Bunch(number="0.9a3", name="Edison", api="4"),
    virtualenv=Bunch(paver_command_line="initial"),
    server=Bunch(
        # set to true to allow connections from other machines
        address="",
        port="8080",
        try_build=False,
        dburl=None,
        async=False,
        config_file=path("devconfig.py"),
        directory=path("../bespinserver/").abspath(),
        clientdir=path.getcwd()),
    server_pavement=lambda: options.server.directory / "pavement.py",
    builddir=path("tmp"),
    install_tiki=Bunch(git=False, force=False),
    jsdocs=Bunch(
        download_url=
        "http://jsdoc-toolkit.googlecode.com/files/jsdoc_toolkit-2.3.0.zip",
        download_location=path("external") / "jsdoc_toolkit-2.3.0.zip",
        dest_dir=path("external") / "jsdoc-toolkit"),
    fetch_compiler=Bunch(
        dest_dir=path("external") / "compiler",
        download_url=
        "http://closure-compiler.googlecode.com/files/compiler-latest.zip",
        download_location=path("external") / "compiler-latest.zip"))

TIKI_TEMPLATE = u"""
コード例 #9
0
def generateModelPath():
    newDir = osp.join(osp.getcwd(),
                      datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    return newDir
コード例 #10
0
ファイル: pavement.py プロジェクト: Irrelon/bespinclient
     name="Edison",
     api="4"
 ),
 virtualenv=Bunch(
     paver_command_line="initial"
 ),
 server=Bunch(
     # set to true to allow connections from other machines
     address="",
     port="8080",
     try_build=False,
     dburl=None,
     async=False,
     config_file=path("devconfig.py"),
     directory=path("../bespinserver/").abspath(),
     clientdir=path.getcwd()
 ),
 server_pavement=lambda: options.server.directory / "pavement.py",
 builddir=path("tmp"),
 install_tiki=Bunch(
     git=False,
     force=False
 ),
 jsdocs=Bunch(
     download_url="http://jsdoc-toolkit.googlecode.com/files/jsdoc_toolkit-2.3.0.zip",
     download_location=path("external") / "jsdoc_toolkit-2.3.0.zip",
     dest_dir=path("external") / "jsdoc-toolkit"
 ),
 fetch_compiler=Bunch(
     dest_dir=path("external") / "compiler",
     download_url="http://closure-compiler.googlecode.com/files/compiler-latest.zip",
コード例 #11
0
ファイル: anno_part.py プロジェクト: Haseon/haseon-thesis2020
    def __init__(self, workpath=None, part_csv_path=None, init_csv_path=None, 
            reftxt_path=None, history_path=None, stride_path=None):
        # setting paths
        if workpath == None:
            from os.path import getcwd
            workpath = getcwd() + "/"
        if part_csv_path == None:
            part_csv_path = workpath+"part.csv"
        if init_csv_path == None:
            init_csv_path = workpath+"init.csv"
        if reftxt_path == None:
            reftxt_path = workpath+"reftxt.txt"
        if history_path == None:
            history_path = workpath+"history.p"
        if stride_path == None:
            stride_path = workpath+"stride.p"
        # read part.csv if there is one, otherwise read init.csv
        if isfile(part_csv_path):
            src_csv_path = part_csv_path
        elif isfile(init_csv_path):
            src_csv_path = init_csv_path
        else:
            raise FileNotFoundError(f"file '{init_csv_path}' does not exist.")
        self.save_path = part_csv_path
        self.history_path = history_path

        with open(src_csv_path, "r", newline='') as src_csv:
            self.srcL = list(csv.DictReader(src_csv))

        self.fieldnames = list(self.srcL[0].keys())

        #load reftxt
        self.analyzed_tokens = []
        self.unanalyzed_tokens = []
        with open(workpath+"reftxt.txt", "r") as rf:
            for line in rf:
                tline = line.split('\t')
                if len(tline) >= 3:
                    self.analyzed_tokens.append(tline[2][:-1])
                    self.unanalyzed_tokens.append(tline[1])
                else:
                    self.analyzed_tokens.append(tline[1][:-1])
                    self.unanalyzed_tokens.append('')
        #load history
        if isfile(history_path):
            with open(history_path, 'rb') as hf:
                self.history = pickle.load(hf)
        else:
            self.history = []
        self.historyix = self.historylen = len(self.history)
        #initialize some variables
        self.annotation = {} #{line_num: case}
        self.partlist = ["JKS" #nominative/subjective
                        ,"JKC" #complementative
                        ,"JKG" #gwanhyeonggyeok
                        ,"JKO" #accusative/objective
                        ,"JKB" #busagyeok
                        ,"JKV" #vocative
                        ,"JKQ" #quotative
                        ,"JX"  #bojosa
                        ,"JC"  #conjunctive
                        ,"NJ"  #cannot be particled
                        ]
        #{case: number_of_cases}
        self.tag_stats = {part: 0 for part in self.partlist}
        self.non_tag_stats = {part: 0 for part in self.partlist}
        self.number_of_unparticled = 0
        if src_csv_path == part_csv_path:
            for d in self.srcL:
                if d["tagged"] != '' and int(d["tagged"]):
                    self.annotation.update({int(d["line_num"]): d["case"]})
                    self.tag_stats[d["case"]] += 1
                elif int(d["particled"]):
                    self.non_tag_stats[d["case"]] += 1
                else:
                    self.number_of_unparticled += 1
        else:
            for d in self.srcL:
                if int(d["particled"]):
                    self.non_tag_stats[d["case"]] += 1
                else:
                    self.number_of_unparticled += 1
        self.workix = 0
        self.tokenlen = len(self.unanalyzed_tokens)
        self.srcLlen = len(self.srcL)
        if isfile(stride_path):
            with open(stride_path, "rb") as sf:
                self.stride_path = pickle.load(sf)
        else:
            self.stride = 10
コード例 #12
0
    each_video_dir = os.path.join(UCFSports_Images_Dir, testlist)
    video_frame_num = d.nframes(testlist)
    labels = list(d._gttubes[testlist].keys())[0]
    resoliution = d._resolution[testlist]
    for nframe in range(video_frame_num):
        num = num + 1
        imgdir = os.path.join(
            each_video_dir, "{:0>6}".format(nframe + 1) + ".jpg")
            # imgdir, testlist, nframe, labels, resoliution, num
        temp = {}
        temp['name'] = 'train'
        temp['dir'] = imgdir[-15:-4]
        temp['bbox'] = d._gttubes[testlist][labels][0][nframe,1:].tolist()
        temp['difficult'] = 0
        temp['labels'] = labels
        result["{:0>6}".format(num)] = [temp]

        # result["{:0>6}".format(num)] =  [{'name': 'train', 'truncated': 0, 'pose': 'Unspecified', 'bbox': [138, 199, 206, 300], 'difficult': 0}]
det_file = os.path.join(os.getcwd(),'result_UCFS','gt','detections.pkl')
with open(det_file, 'wb') as f:
    pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)



filename = os.path.join(os.getcwd(), 'result_UCFS', 'Main.txt')
with open(filename, 'wt') as f:
    for i in range(len(result)):
        f.write('{:s} \n'.format("{:0>6}".format(i+1)))

print('need ot save result pkl')
print('1123')