예제 #1
0
def test_parse_args(tmpdir, windows_safe, verbose, dry_run, structure, source_dir):
	args_list = []
	output_structure = sep.join(["{artist}", "{album}"])

	if verbose:
		args_list.append("-v")

	if dry_run:
		args_list.append("-n")

	if structure:
		output_structure = sep.join(["{artist}", "{album} ({date})"])
		args_list.extend(["-s", sep.join(["r", "l (d)"])])

	if not windows_safe:
		args_list.append("-u")

	args_list.extend([source_dir, str(tmpdir)])

	if source_dir != TEST_AUDIO:
		with pytest.raises(SystemExit):
			parse_args(argv=args_list)
	else:
		args = parse_args(argv=args_list)

		assert args.verbose == verbose
		assert args.dry_run == dry_run
		assert args.windows_safe == windows_safe
		assert args.structure == output_structure
		assert args.src_paths == [TEST_AUDIO]
		assert args.dest_path == str(tmpdir)
예제 #2
0
def parse_nwchem(file_path, ecce=None, kind='scf'):
    """
    Will parse an NWChem output file. Optionally it will attempt to parse
    an ECCE (extensible computational chemistry environment) output containing
    the C matrix to be used in visualization of molecular orbitals. The kind
    parameter chooses the 'scf' or 'dft' C matrix in the ecce output.

    Args:
        file_path (str): file path to the output file
        ecce (str): name of the ecce output in the same directory

    Returns:
        parsed (Editor): contains many attributes similar to the
                         exatomic Universe
    """
    uni = Output(file_path)
    dirtree = sep.join(file_path.split(sep)[:-1])
    if ecce is not None:
        fp = sep.join([dirtree, ecce])
        if path.isfile(fp):
            momat = Ecce(fp, kind=kind)
            uni.momatrix = momat.momatrix
        else:
            print('Is {} in the same directory as {}?'.format(ecce, file_path))
    return uni
예제 #3
0
def enable_units(path=None):
    """Enables units support in a particular instance of GPkit.

    Posynomials created after calling this are incompatible with those created
    before.

    If gpkit is imported multiple times, this needs to be run each time."""
    # pylint: disable=invalid-name,global-statement
    global DimensionalityError, UNIT_REGISTRY, ureg, units
    try:
        import pint
        if path:
            # let user load their own unit definitions
            UNIT_REGISTRY = pint.UnitRegistry(path)
        if UNIT_REGISTRY is None:
            UNIT_REGISTRY = pint.UnitRegistry()  # use pint default
            path = os_sep.join([os_path_dirname(__file__), "pint"])
            UNIT_REGISTRY.load_definitions(os_sep.join([path, "usd_cpi.txt"]))
            # next line patches https://github.com/hgrecco/pint/issues/366
            UNIT_REGISTRY.define("nautical_mile = 1852 m = nmi")

        ureg = UNIT_REGISTRY
        DimensionalityError = pint.DimensionalityError
        units = GPkitUnits()
    except ImportError:
        print("Optional Python units library (Pint) not installed;"
              " unit support disabled.")
        disable_units()
예제 #4
0
파일: output.py 프로젝트: tjduigna/exatomic
def parse_nwchem(file_path, ecce=None, kind='scf'):
    """
    Will parse an NWChem output file. Optionally it will attempt to parse
    an ECCE (extensible computational chemistry environment) output containing
    the C matrix to be used in visualization of molecular orbitals. The kind
    parameter chooses the 'scf' or 'dft' C matrix in the ecce output.

    Args:
        file_path (str): file path to the output file
        ecce (str): name of the ecce output in the same directory

    Returns:
        parsed (Editor): contains many attributes similar to the
                         exatomic Universe
    """
    uni = Output(file_path)
    dirtree = sep.join(file_path.split(sep)[:-1])
    if ecce is not None:
        fp = sep.join([dirtree, ecce])
        if path.isfile(fp):
            momat = Ecce(fp, kind=kind)
            uni.momatrix = momat.momatrix
        else:
            print('Is {} in the same directory as {}?'.format(ecce, file_path))
    return uni
예제 #5
0
def enable_units(path=None):
    """Enables units support in a particular instance of GPkit.

    Posynomials created after calling this are incompatible with those created
    before.

    If gpkit is imported multiple times, this needs to be run each time."""
    # pylint: disable=invalid-name,global-statement
    global DimensionalityError, UNIT_REGISTRY, ureg, units
    try:
        import pint
        if path:
            # let user load their own unit definitions
            UNIT_REGISTRY = pint.UnitRegistry(path)
        if UNIT_REGISTRY is None:
            UNIT_REGISTRY = pint.UnitRegistry()  # use pint default
            path = os_sep.join([os_path_dirname(__file__), "pint"])
            UNIT_REGISTRY.load_definitions(os_sep.join([path, "usd_cpi.txt"]))
            # next line patches https://github.com/hgrecco/pint/issues/366
            UNIT_REGISTRY.define("nautical_mile = 1852 m = nmi")

        ureg = UNIT_REGISTRY
        DimensionalityError = pint.DimensionalityError
        units = GPkitUnits()
    except ImportError:
        print("Optional Python units library (Pint) not installed;"
              " unit support disabled.")
        disable_units()
예제 #6
0
 def test_find_path_dupes_fuzzy(self):
     max_test_length = 20
     safechars = string.ascii_letters + string.digits + "~ -_."
     for match_until in range(3, max_test_length):
         common_prefix = ''.join(
             random.choices(safechars, k=match_until - 1))
         # we don't care that the nodes have different lengths
         # control node
         node_1 = common_prefix + '1' + ''.join(
             random.choices(safechars, k=max_test_length))
         # test node matches until len(common_prefix) + 1
         node_a = common_prefix + 'a' + ''.join(
             random.choices(safechars, k=max_test_length))
         test_path = Path(sep.join([node_1, node_a]))
         test_path_reverse = Path(sep.join([node_a, node_1]))
         for num in range(1, max_test_length):
             # all checks up to the unique character should raise
             if num < match_until:
                 with pytest.raises(DebugTraceReport):
                     find_path_dupes(full_path=test_path,
                                     number_of_nodes=1,
                                     min_match_prefix=num)
                 with pytest.raises(DebugTraceReport):
                     find_path_dupes(full_path=test_path_reverse,
                                     number_of_nodes=1,
                                     min_match_prefix=num)
             # anything longer should just return
             else:
                 assert node_a == find_path_dupes(full_path=test_path,
                                                  number_of_nodes=1,
                                                  min_match_prefix=num)
                 assert node_1 == find_path_dupes(
                     full_path=test_path_reverse,
                     number_of_nodes=1,
                     min_match_prefix=num)
예제 #7
0
def run(args):

    opts = parse_args(args)

    initf = sep.join([opts.transform_dir, '__init__.py'])
    transform = opts.transform
    transformf = sep.join([opts.transform_dir, transform if transform.endswith('.py') else '%s.py' % transform ])

    if not path.exists(initf):
        print 'Directory %s does not appear to be a python package directory... quitting!' % repr(opts.transform_dir)
        exit(-1)
    if not path.exists(transformf):
        print "Transform %s doesn't exists... quitting" % repr(transformf)
        exit(-1)

    print 'deleting transform %s...' % repr(transformf)
    unlink(transformf)

    print 'updating %s' % initf
    init = file(initf).read()

    with file(initf, mode='wb') as w:
        w.write(
            sub(
                r'\s*%s,?' % repr(transform),
                '',
                init
            )
        )

    print 'done!'
예제 #8
0
 def setup_method(self):
     source_path = Path(sep.join(['one', 'two', 'three', 'four', 'five']))
     self.test_set = [{
         'path': source_path,
         'nodes': 4,
         'expect': sep.join(['two', 'three', 'four', 'five'])
     }, {
         'path': source_path,
         'nodes': 3,
         'expect': sep.join(['three', 'four', 'five'])
     }, {
         'path': source_path,
         'nodes': 2,
         'expect': sep.join(['four', 'five'])
     }]
     dupes_path = Path(sep.join(['one', 'two', 'three', 'three', 'four']))
     self.test_dupes = [{
         'path': dupes_path,
         'nodes': 4
     }, {
         'path': dupes_path,
         'nodes': 3
     }, {
         'path': dupes_path,
         'nodes': 2
     }]
예제 #9
0
 def __init__(self, year=None):
     self.base_url = 'http://www.presidency.ucsb.edu'
     self.headers = {
         'accept-encoding': 'gzip, deflate, br',
         'accept-language': 'en-US,en;q=0.9,ja;q=0.8',
         'cache-control': 'max-age=0',
         'dnt': '1',
         'upgrade-insecure-requests': '1',
         'user-agent': 'PyElection'
     }
     try:
         res = requests.get(url=sep.join([self.base_url, 'elections.php']), headers=self.headers)
         tables = BeautifulSoup(res.content, features='lxml').find_all('table')
         self.years = list(filter(None, map(lambda x: x.text, tables[10].find_all('a'))))
         self.year = str(year)
     except:
         raise ConnectionException('Failed to connect')
     if self.year not in self.years:
         raise ArgumentException('Provided year is not found: %s' % str(self.years))
     try:
         res = requests.get(url=sep.join([self.base_url, 'showelection.php']), headers=self.headers, params={
             'year': self.year
         })
         self.__tables = BeautifulSoup(res.content, features='lxml').find_all('table')
     except:
         raise
예제 #10
0
파일: computencaa.py 프로젝트: wusui/toybox
def compute_it(rundle):
    """
    Compute winning scores for every entry in a group.  Save this information
    in results.json.  Also save scores.txt for possible manual double checking.

    rundle -- group name
    """
    ofname = sep.join(['data', 'reality.json'])
    if not isfile(ofname):
        with open(ofname, 'w') as f:
            json.dump(get_reality(), f)
    with open(sep.join(["data", "reality.json"]), 'r') as f:
        nlist = json.load(f)
    with open(sep.join(["data", rundle, "picks.json"]), 'r') as f:
        pinfo = json.load(f)
    stash = []
    names = []
    ostring = ''
    for apick in pinfo:
        my_picks = {'name': apick[0], 'picks': apick[1]}
        print "Handling %s" % apick[0]
        ostring += "%s === %s\n" % (apick[0], score_so_far(nlist, my_picks))
        stash.append(gen_future(nlist, my_picks))
        names.append(my_picks['name'])
    data1 = gen_possible(nlist, stash, names)
    with open(sep.join(["data", rundle, "results.json"]), 'w') as f:
        json.dump(data1, f)
    with open(sep.join(["data", rundle, "scores.txt"]), 'w') as f:
        f.write(ostring)
예제 #11
0
 def _handle_image(self, data):
     savedir = getcwd()
     if self.params['save_dir'] != "":
         savedir = self.params['save_dir']
     if self.params['file_name'] != "":
         imgname = filename
     else:
         nxt = 0
         try:
             lgfls = [fl.split(sep)[-1] for fl in glob(sep.join([savedir, "*png"]))]
             numbers = ["".join([c for c in fl if c.isdigit()]) for fl in lgfls]
             last = sorted([int(num) for num in numbers if num])[-1]
             nxt = last + 1
             imgname = "{:06d}.png".format(nxt)
         except:
             imgname = "{:06d}.png".format(nxt)
     if path.isfile(sep.join([savedir, imgname])):
         print("Automatic file name generation failed. Use uni._widget.params['file_name']")
         return
     with open(sep.join([savedir, imgname]), "wb") as f:
         f.write(b64decode(data.replace("data:image/png;base64,", "")))
     # TODO : this likely won"t work on windows but SHOULD automatically
     #        crop the image to minimize whitespace of the final image.
     try:
         crop = " ".join(["convert -trim", imgname, imgname])
         subprocess.call(crop, cwd=savedir, shell=True)
     except:
         pass
예제 #12
0
파일: filepackage.py 프로젝트: marma/starch
 def _add_directory(self, dir, path, exclude='^\\..*|^_.*'):
     ep = compile(exclude)
     for f in listdir(dir):
         if not ep.match(f):
             self.add(sep.join([dir, f]),
                      path=sep.join([path, f]),
                      exclude=exclude)
예제 #13
0
def common(paths):
    """
  Returns the longest sub-path of each path in the *paths* list. If *paths* is
  empty, contains mixed absolute/relative paths or the paths have no common
  path, a :class:`ValueError` is raised.

  If there is only one element in *paths*, its parent directory is returned.
  """

    if not paths:
        raise ValueError('paths is empty')

    parts = []
    has_abs = None
    for path in paths:
        if not isabs(path):
            if has_abs is None:
                has_abs = False
            elif has_abs:
                raise ValueError(
                    'paths contains relative and absolute pathnames')
        else:
            if has_abs is None:
                has_abs = True
            elif not has_abs:
                raise ValueError(
                    'paths contains relative and absolute pathnames')

        path = norm(path)
        parts.append(path.split(sep))

    if len(parts) == 1:
        path = dirname(sep.join(parts[0]))
        if not has_abs:
            path = rel(path)
        return path

    common = parts[0]
    for elements in parts[1:]:
        if len(elements) < len(common):
            common = common[:len(elements)]
        for index, elem in enumerate(elements):
            if index >= len(common):
                break
            if elem != common[index]:
                common = common[:index]
                break
            if not common:
                break

    if not common:
        raise ValueError("no common path")

    common = sep.join(common)
    if not has_abs:
        common = rel(common)
    return common
예제 #14
0
def main(argv, file):
    configfile = open('config.yaml', 'r')
    with configfile:
        config = yaml.load(configfile)
    if config.get('output', False):
        suggestions = open(config['output'], 'r')
    with suggestions:
        topics = json.load(suggestions)
    script_path = path.abspath(__file__)
    script_dir = path.split(script_path)[0]
    if not path.exists(sep.join([script_dir, 'topics'])):
        makedirs(sep.join([script_dir, 'topics']))
    for topic in topics:
        if topic.get('posts', False):
            posts = len(topic['posts'])
        else:
            posts = 0
        if posts > 0:
            threadfile = open(
                sep.join([
                    script_dir,
                    'topics',
                    str(topic['id']) + '.md']), 'w+')
            with threadfile:
                for post in topic['posts']:
                    post_url = config['post']['url'] + '?' +\
                        urlencode({
                            config['post']['attribute']: post['id']
                        })
                    if config['post'].get('hash', False):
                        post_url += '#' +\
                            str(config['post']['hash']).format(id=post['id'])
                    author_url = config['user']['url'] + '?' +\
                        urlencode({
                            config['user']['attribute']: post['author']['id'],
                            'mode': config['user']['mode'],
                        })
                    time = datetime.fromtimestamp(post['time']) + \
                        timedelta(hours=9)
                    time = time.strftime('%a %b %d, %Y %I:%M %p')
                    post_link = '[' + post['title'] + '](' + post_url + ')'
                    author_link =\
                        '[' + post['author']['name'] + '](' + author_url + ')'

                    post_lines = post['post'].split(linesep)
                    new_post_lines = []
                    for line in post_lines:
                        line = '> '+line
                        new_post_lines.append(line)
                    threadfile.write(
                        time + ' ' + post_link + ' by ' + author_link + ':' +
                        linesep
                    )
                    threadfile.write(
                        str(linesep).join(new_post_lines) +
                        2*linesep
                    )
예제 #15
0
 def __init__(self,
              catchment,
              catchment_area_m2,
              start,
              end,
              time_delta_simu,
              time_delta_save,
              warm_up_days,
              in_format,
              root,
              gauged_area_m2=None):
     # general information
     self.catchment = catchment
     self.area = catchment_area_m2
     # directory information
     self.root_f = root
     self.in_f = sep.join([self.root_f, 'in', self.catchment, sep])
     self.out_f = sep.join([self.root_f, 'out', self.catchment, sep])
     if not path.exists(self.out_f):
         makedirs(self.out_f)
     # temporal information
     self.start = start
     self.end = end
     self.delta_simu = time_delta_simu
     self.delta_save = time_delta_save
     self.timeframe = TimeFrame(self.start, self.end, self.delta_simu,
                                self.delta_save)
     self.timeseries = self.timeframe.get_series_simu()
     self.timeseries_report = self.timeframe.get_series_save()
     self.warm_up = warm_up_days
     # physical information
     extra_ext = '.nc' if in_format == 'netcdf' else ''
     self.rain = get_dict_rain_series_simu(
         ''.join([self.in_f, self.catchment,
                  '.rain' + extra_ext]), in_format, self.timeseries[1],
         self.timeseries[-1], self.delta_simu)
     self.peva = get_dict_peva_series_simu(
         ''.join([self.in_f, self.catchment,
                  '.peva' + extra_ext]), in_format, self.timeseries[1],
         self.timeseries[-1], self.delta_simu)
     if gauged_area_m2:
         self.flow = get_dict_discharge_series(
             ''.join([self.in_f, self.catchment, '.flow']),
             self.timeframe.get_series_save()[1],
             self.timeframe.get_series_save()[-1], catchment_area_m2,
             gauged_area_m2)
     else:
         self.flow = None
     # optional extra information for setting up initial levels in reservoirs
     self.extra = None
     # parameters
     self.parameters = Parameters()
     # model outputs
     self.outputs = None
     self.discharge = None
     self.gw_contribution = None
 def setUp(self):
     self.current_dir = tempfile.mkdtemp()
     self.source_dir = abspath(dirname(__file__))
     self.test_new_plugin_dir = sep.join([self.current_dir, 'vim-tests'])
     self.plugin_dir = sep.join([self.test_new_plugin_dir, "plugin"])
     self.tests_dir = sep.join([self.plugin_dir, 'tests'])
     self.doc_dir = sep.join([self.test_new_plugin_dir, "doc"])
     with mock.patch('scaffold_vim_plugin.create_plugin_scaffold.getcwd', return_value=self.current_dir):
         with mock.patch('__builtin__.raw_input', side_effect=['vim-tests', 'JarrodCTaylor', 'n']):
             create_scaffold()
 def setUp(self):
     self.current_dir = tempfile.mkdtemp()
     self.source_dir = abspath(dirname(__file__))
     self.test_new_plugin_dir = sep.join([self.current_dir, "vim-tests"])
     self.plugin_dir = sep.join([self.test_new_plugin_dir, "plugin"])
     self.tests_dir = sep.join([self.plugin_dir, "tests"])
     self.doc_dir = sep.join([self.test_new_plugin_dir, "doc"])
     with mock.patch("vim_plugin_starter_kit.create_plugin_scaffold.getcwd", return_value=self.current_dir):
         with mock.patch("__builtin__.raw_input", side_effect=["vim-tests", "JarrodCTaylor", "n"]):
             create_scaffold()
 def getContentPath(self, content):
     """
     given a content object, return the object's unique filesystem
     absolute path prefix.
     """        
     # means we want the full fs path
     rlen = len(self.getCMFMountPoint().getPhysicalPath())
     return sep.join ( (self.mount_point,
                        sep.join(content.getPhysicalPath()[rlen:][:-1]) )
                       )
예제 #19
0
def build_scaffold_based_on_template(paths, plugin_type):
    if plugin_type:
        makedirs(sep.join([paths['new_plugin_path'], 'ftplugin', plugin_type]))
    else:
        makedirs(sep.join([paths['new_plugin_path'], 'plugin']))
    shutil.copytree(paths['template_tests_dir'], paths['new_plugin_tests_dir'])
    shutil.copytree(paths['template_doc_dir'], paths['new_plugin_doc_dir'])
    shutil.copyfile(paths['template_py_file'], paths['plugin_py'])
    shutil.copyfile(paths['template_vim_file'], paths['plugin_vim'])
    shutil.copyfile(paths['template_readme_file'], paths['plugin_readme_file'])
def build_scaffold_based_on_template(paths, plugin_type):
    if plugin_type:
        makedirs(sep.join([paths['new_plugin_path'], 'ftplugin', plugin_type]))
    else:
        makedirs(sep.join([paths['new_plugin_path'], 'plugin']))
    shutil.copytree(paths['template_tests_dir'], paths['new_plugin_tests_dir'])
    shutil.copytree(paths['template_doc_dir'], paths['new_plugin_doc_dir'])
    shutil.copyfile(paths['template_py_file'], paths['plugin_py'])
    shutil.copyfile(paths['template_vim_file'], paths['plugin_vim'])
    shutil.copyfile(paths['template_readme_file'], paths['plugin_readme_file'])
예제 #21
0
파일: path.py 프로젝트: winksaville/craftr
def common(paths):
    """
  Returns the longest sub-path of each path in the *paths* list. If *paths* is
  empty, contains mixed absolute/relative paths or the paths have no common
  path, a :class:`ValueError` is raised.

  If there is only one element in *paths*, its parent directory is returned.
  """

    if not paths:
        raise ValueError("paths is empty")

    parts = []
    has_abs = None
    for path in paths:
        if not isabs(path):
            if has_abs is None:
                has_abs = False
            elif has_abs:
                raise ValueError("paths contains relative and absolute pathnames")
        else:
            if has_abs is None:
                has_abs = True
            elif not has_abs:
                raise ValueError("paths contains relative and absolute pathnames")

        path = norm(path)
        parts.append(path.split(sep))

    if len(parts) == 1:
        path = dirname(sep.join(parts[0]))
        if not has_abs:
            path = rel(path)
        return path

    common = parts[0]
    for elements in parts[1:]:
        if len(elements) < len(common):
            common = common[: len(elements)]
        for index, elem in enumerate(elements):
            if index >= len(common):
                break
            if elem != common[index]:
                common = common[:index]
                break
            if not common:
                break

    if not common:
        raise ValueError("no common path")

    common = sep.join(common)
    if not has_abs:
        common = rel(common)
    return common
예제 #22
0
def detect_settings_dir():
    d = None
    if platform.startswith('linux'):
        d = _detect_settings_dir(sep.join([path.expanduser('~'), '.maltego']))
    elif platform == 'darwin':
        d = _detect_settings_dir(sep.join([path.expanduser('~'), 'Library', 'Application Support', 'maltego']))
    elif platform == 'win32':
        d = _detect_settings_dir(sep.join([environ['APPDATA'], '.maltego']))
    else:
        raise NotImplementedError('Unknown or unsupported OS: %s' % repr(platform))
    return d
def split_guada_path(path):
    pieces = path.split(sep)
    if pieces[0] == apps_dir:
        if pieces[2] == trunk_dir:
            return (None, sep.join(pieces[3:]))
        elif pieces[2] == tags_dir:
            return (sep.join(pieces[0:3]), sep.join(pieces[3:]))
    elif pieces[0] == metapkgs_dir:
        return (sep.join(pieces[0:2]), sep.join(pieces[2:]))
    else:
        return None
예제 #24
0
 def __init__(self):
     self._pattern_score = PatternSentimentAnalyzer()
     self.base_path = path.dirname(
         path.dirname(path.dirname(path.dirname(path.dirname(__file__)))))
     self.pattern = re.compile('\W')
     # Reading the neg_features from resource and store as set in memory.
     self.not_set = ResourceManager.read_file(
         sep.join([self.base_path, "resources", "neg_features"]))
     # Reading the pos_features from resource and store as set in memory.
     self.pos_set = ResourceManager.read_file(
         sep.join([self.base_path, "resources", "pos_features"]))
예제 #25
0
 def setUp(self):
     self.current_dir = tempfile.mkdtemp()
     self.source_dir = abspath(dirname(__file__))
     self.test_new_plugin_dir = sep.join([self.current_dir, 'vim-tests'])
     self.plugin_dir = sep.join([self.test_new_plugin_dir, "plugin"])
     self.tests_dir = sep.join([self.plugin_dir, 'tests'])
     self.doc_dir = sep.join([self.test_new_plugin_dir, "doc"])
     with mock.patch('vim_plugin_starter_kit.create_plugin_scaffold.getcwd',
                     return_value=self.current_dir):
         with mock.patch('__builtin__.raw_input',
                         side_effect=['vim-tests', 'JarrodCTaylor', 'n']):
             create_scaffold()
예제 #26
0
파일: rules.py 프로젝트: user01/coreferee
 def read_in_data_files(directory: str, rules_analyzer):
     for data_filename in (
             filename for filename in pkg_resources.resource_listdir(
                 __name__, sep.join(('lang', directory, 'data')))
             if filename.endswith('.dat')):
         full_data_filename = pkg_resources.resource_filename(
             __name__,
             sep.join(('lang', directory, 'data', data_filename)))
         with open(full_data_filename, "r", encoding="utf-8") as file:
             setattr(rules_analyzer, data_filename[:-4], [
                 v.strip() for v in file.read().splitlines()
                 if len(v.strip()) > 1 and not v.strip().startswith('#')
             ])
예제 #27
0
 def test_linux(self):
     homepath = sep.join([randomstring(), randomstring()])
     user = randomstring()
     home = homepath.split(sep)
     home[-1] = user
     home = sep.join(home)
     with patch(__package__ + '.get_home.get_os',
                return_value=OS.CENTOS), patch.dict(
                    __package__ + '.get_home.environ',
                    HOME=homepath), patch(__package__ +
                                          '.get_home.get_user',
                                          return_value=user):
         self.assertEqual(subject(), join(sep, 'home', user))
예제 #28
0
def run(args):

    opts = parser.parse_args(args)

    package_name = opts.package
    capitalized_package_name = package_name.capitalize()

    values = {
        'package' : package_name,
        'entity' : 'My%sEntity' % capitalized_package_name,
        'base_entity' : '%sEntity' % capitalized_package_name,
        'author' : getuser(),
        'year' : 2012,
        'project' : capitalized_package_name,
        'namespace' : package_name
    }

    base = sep.join([package_name, 'src', package_name])
    transforms = sep.join([base, 'transforms'])
    resources = sep.join([base, 'resources'])

    if not path.exists(package_name):
        print 'creating skeleton in %s' % package_name
        build_skeleton(
            package_name,
            [package_name, 'src'],
            [package_name, 'maltego'],
            base,
            transforms,
            [transforms, 'common'],
            resources,
            [resources, 'etc'],
            [resources, 'images']
        )
    else:
        print 'A directory with the name %s already exists... exiting' % package_name
        exit(-1)


    init = read_template('__init__', values)

    write_setup(package_name, values)

    write_root(base, init)

    write_resources(package_name, resources, init, values)

    write_common(transforms, init, values)

    print 'done!'
예제 #29
0
def test_get_new_path(mock_normalize, tmpdir):
    mock_logger = Mock()
    tags = {"artist": "Track Artist", "album": "Track Album", "date": "1017"}
    valid_structure = sep.join(["{artist}", "{album} ({date})"])
    invalid_structure = sep.join(["{genre}", "{artist}", "{album}"])

    mock_normalize.return_value = tags

    assert join(str(tmpdir), "Track Artist",
                "Track Album (1017)") == get_new_path(mock_logger, str(tmpdir),
                                                      valid_structure, None,
                                                      True)
    assert None == get_new_path(mock_logger, str(tmpdir), invalid_structure,
                                None, True)
예제 #30
0
def processfile(packed):
    """
    Read an image file and write a smaller version.

    Arguments:
        packed: A 3-tuple of (path, filename, output width)

    Returns:
        A 2-tuple (input file name, status).
        Status 0 indicates a succesful conversion,
        status 1 means that the input file was not a recognized image format,
        status 2 means a subprocess error.
    """
    path, name, newwidth = packed
    fname = sep.join([path, name])
    oname = sep.join([path, outdir, name.lower()])

    try:
        img = Image.open(fname)
        ld = {}
        for tag, value in img._getexif().items():
            decoded = TAGS.get(tag, tag)
            ld[decoded] = value
        want = set([
            'DateTime', 'DateTimeOriginal', 'CreateDate', 'DateTimeDigitized'
        ])
        available = sorted(list(want.intersection(set(ld.keys()))))
        fields = ld[available[0]].replace(' ', ':').split(':')
        dt = datetime(int(fields[0]), int(fields[1]), int(fields[2]),
                      int(fields[3]), int(fields[4]), int(fields[5]))
    except Exception:
        logging.warning('exception raised when reading the file time.')
        ed = {}
        cds = '{}:{}:{} {}:{}:{}'
        dt = datetime.today()
        ed['CreateDate'] = cds.format(dt.year, dt.month, dt.day, dt.hour,
                                      dt.minute, dt.second)
    args = [
        'convert', fname, '-strip', '-resize',
        str(newwidth), '-units', 'PixelsPerInch', '-density', '300',
        '-unsharp', '2x0.5+0.7+0', '-quality', '80', oname
    ]
    rp = subprocess.call(args)
    if rp != 0:
        return (name, 2)
    modtime = mktime(
        (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, 0, 0, -1))
    utime(oname, (modtime, modtime))
    return (fname, 0)
예제 #31
0
파일: pathtools.py 프로젝트: fongchun/ProDy
def sympath(path, beg=2, end=1, ellipsis='...'):
    """Returns a symbolic path for a long *path*, by replacing folder names
    in the middle with *ellipsis*.  *beg* and *end* specified how many folder
    (or file) names to include from the beginning and end of the path."""

    abs_items = abspath(path).split(pathsep)
    rel_items = relpath(path).split(pathsep)
    if len(abs_items) <= len(rel_items):
        items = abs_items
    else:
        items = rel_items
    if len(items) <= beg + end:
        return pathsep.join(items)
    else:
        return pathsep.join(items[:beg+1] + [ellipsis] + items[-end:])
예제 #32
0
 def test_win(self):
     homedrive = randomstring()
     homepath = sep.join([randomstring(), randomstring()])
     user = randomstring()
     home = (homedrive + homepath).split(sep)
     home[-1] = user
     home = sep.join(home)
     with patch(__package__ + '.get_home.get_os',
                return_value=OS.WIN32), patch.dict(
                    __package__ + '.get_home.environ',
                    HOMEDRIVE=homedrive,
                    HOMEPATH=homepath), patch(__package__ +
                                              '.get_home.get_user',
                                              return_value=user):
         self.assertEqual(subject(), home)
예제 #33
0
def sympath(path, beg=2, end=1, ellipsis='...'):
    """Return a symbolic path for a long *path*, by replacing folder names
    in the middle with *ellipsis*.  *beg* and *end* specified how many folder
    (or file) names to include from the beginning and end of the path."""

    abs_items = abspath(path).split(pathsep)
    rel_items = relpath(path).split(pathsep)
    if len(abs_items) <= len(rel_items):
        items = abs_items
    else:
        items = rel_items
    if len(items) <= beg + end:
        return pathsep.join(items)
    else:
        return pathsep.join(items[:beg + 1] + [ellipsis] + items[-end:])
예제 #34
0
파일: gen_tables.py 프로젝트: wusui/toybox
def gen_display(rundle):
    """
    Write the html file to display data for each potentially winning entrant.
    First read the template.txt file to get a sample html file.  Next start
    replacing fixed blocks of text with text that is appropriate for the
    table to display.  For each entrant, caclulate the percentage times that
    entry should win and collect a set of 'events to root for next round'.
    After that, write the file to the appropriate subdiretory depending on
    how deep into the tournament we are.

    rundle -- group name
    """
    with open('template.txt', 'r') as f:
        htmld = f.read()
    drundle = ' '.join(rundle.split('_'))
    htmld = htmld.replace('XRUNDLEX', drundle)
    saved_data = get_page_info(rundle)
    denom = get_denom(saved_data)
    for plr in saved_data:
        plr['pct'] = get_pct(plr['score'], denom) 
    with open(sep.join(['data', 'reality.json']), 'r') as f:
        happened = json.load(f)
    wpattern = happened[-1]
    game_head = game_headers(wpattern)
    htmld = htmld.replace('XMATCHUPHEADERSX', game_head)
    otable = ''
    for plr in saved_data:
        sdisp = []
        otable += "<tr>"
        for windx in range(0, len(wpattern), 2):
            indx = windx / 2
            opp = plr['score'] - plr['winv'][indx]
            factor = 0
            if opp < plr['winv'][indx]:
                factor = 1
            degree = max(opp, plr['winv'][indx])
            if opp == plr['winv'][indx]:
                school = '*'
            else:
                school = wpattern[windx+factor]
            sdisp.append((school, bgcolor(plr['score'], degree)))
        otable = gen_tbl_line(otable, plr, sdisp)
        otable += "</tr>"
    htmld = htmld.replace('XTABLEDATAX', otable)
    bround = {16: 'sweet16', 8: 'elite8', 4: 'Final4'}
    pround = bround[len(wpattern)]
    with open(sep.join(['%s', '%s.html']) % (pround, rundle), 'w') as wf:
        wf.write(htmld)
예제 #35
0
    def __init__(self, delay=1, browser="firefox"):
        """delay: Number of extra seconds to wait when a page is
           supposedly loaded. Try raising this in case of weird errors.

           browser: `firefox` or `chrome`. The ChromeDriver executable for your
           OS must be inside the bin directory for Chrome to work. Get it from:
           http://chromedriver.storage.googleapis.com/index.html
        """
        self.extra_delay = delay  # extra time to wait after each operation (s)
        self.temp_dir = mkdtemp()

        self.vdisplay = Xvfb()
        self.vdisplay.start()
        if browser == "firefox":
            profile = FirefoxProfile()
            # Open links in same window
            profile.set_preference("browser.link.open_newwindow", 1)
            # Download to temp dir, for files we can't open inline
            profile.set_preference("browser.download.dir", self.temp_dir)
            profile.set_preference("browser.download.folderList", 2)
            profile.set_preference("browser.download.manager.showWhenStarting",
                                   "False")
            profile.set_preference("browser.helperApps.neverAsk.saveToDisk",
                                   "application/msword, application/vnd.ms-word, application/rtf, application/octet-stream")

            # Add extension for overriding Content-Disposition headers, etc
            extensions_dir = os_sep.join(['bin', 'firefox-plugins-enabled'])
            for filename in listdir(extensions_dir):
                fullfilename = os_sep.join([extensions_dir, filename])
                profile.add_extension(extension=fullfilename)

            driver = Firefox(profile)
        elif browser == "chrome":
            # Add extension for overriding Content-Disposition headers
            options = ChromeOptions()
            options.add_extension('bin/undisposition.crx')
            driver = Chrome(executable_path='bin/chromedriver',
                            chrome_options=options)
        else:
            raise Exception("Not a valid browser name")

        self.selenium_driver = EventFiringWebDriver(driver, CustomListener())
        """selenium_driver is a EventFiringWebDriver, so that it can
           trigger javascript event
        """
        self.browser_version = " ".join([
            self.selenium_driver.capabilities['browserName'],
            self.selenium_driver.capabilities['version']])  # 'Firefox 33.0'
예제 #36
0
def parse_molcas(file_path, momatrix=None, overlap=None, occvec=None, density=None, **kwargs):
    """
    Will parse a Molcas output file. Optionally it will attempt
    to parse additional information obtained from the same directory
    from specified Orb files or the AO overlap matrix and density matrix.
    If density keyword is specified, the momatrix keyword is ignored.

    Args
        file_path (str): Path to output file
        momatrix (str): file name of the C matrix of interest
        overlap (str): file name of the overlap matrix
        density (str): file name of the density matrix

    Returns
        parsed (Editor): contains many attributes similar to the
            exatomic universe
    """
    uni1 = Output(file_path, **kwargs)
    dirtree = sep.join(file_path.split(sep)[:-1])
    if density is not None:
        fp = sep.join([dirtree, density])
        if os.path.isfile(fp):
            dens = DensityMatrix(_parse_ovl(fp))
            uni1.density = dens
        else:
            print('Is {} in the same directory as {}?'.format(density, file_path))
    if momatrix is not None and density is None:
        fp = sep.join([dirtree, momatrix])
        if os.path.isfile(fp):
            orbs = Orb(fp)
            orbs.parse_momatrix()
            if occvec is not None:
                dens = DensityMatrix.from_momatrix(orbs.momatrix, occvec)
            else:
                dens = DensityMatrix.from_momatrix(orbs.momatrix, orbs.occupation_vector)
            uni1.momatrix = orbs.momatrix
            uni1.occupation_vector = orbs.occupation_vector
            uni1.density = dens
        else:
            print('Is {} in the same directory as {}?'.format(momatrix, file_path))
    if overlap is not None:
        fp = sep.join([dirtree, overlap])
        if os.path.isfile(fp):
            ovl = _parse_ovl(fp)
            uni1.overlap = ovl
        else:
            print('Is {} in the same directory as {}?'.format(overlap, file_path))
    return uni1
예제 #37
0
def delete_file(filename):
    filename = code_map[filename]
    try:
        remove(sep.join([STORAGE, filename]))
    except:
        print("unable to delete %s" % filename.encode().decode('latin-1'))
    return redirect(url_for('home'))
예제 #38
0
def download_file(filename):
    filename = code_map[filename]
    response = make_response(
        send_file(sep.join([STORAGE, filename]), as_attachment=True))
    response.headers["Content-Disposition"] = "attachment; filename={}".format(
        filename.encode().decode('latin-1'))
    return response
def compare_simulation_files(simulation_files, tol):
    REF_INDEX = 0
    OTHER_INDEX = 1
    at_least_one_diff = False
    for file_pair in simulation_files:
        # Read reference and simulation (other) files
        ref_data_frame = read_csv(file_pair[REF_INDEX])
        other_data_frame = read_csv(file_pair[OTHER_INDEX])

        # Check that reference column titles are a subset of the simulation titles
        ref_column_titles = get_headers(ref_data_frame)
        other_column_titles = get_headers(other_data_frame)

        check(ref_column_titles.issubset(other_column_titles),
              f"The following column(s) is missing in the reference {ref_column_titles.difference(other_column_titles)}")

        for col_name in ref_column_titles:
            try:
                pandas.testing.assert_series_equal(ref_data_frame[col_name], other_data_frame[col_name], \
                                               atol=tol.absolute(col_name), rtol=tol.relative(col_name))
            except AssertionError:  # Catch and re-raise exception to print col_name & tolerances
                # Reduced path of files we're comparing
                file_path = sep.join(file_pair[REF_INDEX].absolute().parts[-4:])

                print_comparison_report(ref_data_frame, other_data_frame, file_path, col_name)
                at_least_one_diff = True

    return not at_least_one_diff
예제 #40
0
파일: nmap.py 프로젝트: mshelton/sploitego
def savereport(report):
    if not path.exists(config['nmap/reportdir']):
        makedirs(config['nmap/reportdir'])
    f = ufile(strftime(sep.join([config['nmap/reportdir'], config['nmap/namefmt']])))
    f.write(report.output)
    f.close()
    return f.name
def format_data_line(compartment,bdiv_metric,category,category_results,\
  residual_results,total_results,n_permutations,function_call,path,sep="\t"):
    """Format a csv using results"""
    #Headings are:
    if category_results["p"] < 0.001:
        category_results["sig"] = '***'
    elif category_results["p"] < 0.01:
        category_results["sig"] = '**'
    elif category_results["p"] < 0.05:
        category_results["sig"] = "*"
    elif category_results["p"] < 0.1:
        category_results["sig"] = "."
    else:
        category_results["sig"] = " "

    #adjust R2
    df = category_results["degrees_of_freedom"]
    total_df = total_results["degrees_of_freedom"]
    r2 = category_results["R2"]
    adjusted_r2 = adjust_r2(r2, df, total_df)
    rounded_adjusted_r2 = round(adjusted_r2, 2)

    data_list = [compartment,category,bdiv_metric,n_permutations,function_call,\
      category_results["degrees_of_freedom"],category_results["sum_of_squares"],category_results["F"],category_results["R2"],\
      adjusted_r2,rounded_adjusted_r2,category_results["p"],category_results["sig"],residual_results["degrees_of_freedom"],\
      residual_results["sum_of_squares"],residual_results["R2"],\
      total_results["degrees_of_freedom"],path]
    return sep.join(map(str, data_list)) + "\n"
예제 #42
0
 def tearDown(self):
     dirs = ['nitromalonamide_nmr']
     for dir in dirs:
         path = sep.join([dir, '*'])
         for i in glob(path):
             remove(i)
         rmdir(dir)
예제 #43
0
    def __stop_go_app(self, path_to_file):
        EXTENTION = '.go'

        project_forlders = self.view.window().folders()

        while len(path_to_file)>2: # 2 = (directory + file)
            path_to_file.pop()
            directory = path_separator.join(path_to_file)
            project_forlders = [x for x in project_forlders if x != directory]

            fields_list = [path_to_file[-1]]
            for x in os.listdir(directory):
                if not isdir(x) and not x.endswith(EXTENTION):
                    continue

                fields_list.append(x[:-len(EXTENTION)])

            def kill(process_name):
                try:
                    self.__kill_process(process_name, True)
                    return True
                except:
                    return False

            for name_without_extention in fields_list:

                if kill(name_without_extention) or \
                   kill('{0}.test'.format(name_without_extention)):
                    break

            if len(project_forlders) == 0:
                 break
예제 #44
0
def requirements(filename, module):
    '''
    '''
    head, tail = split(realpath(filename))
    
    while tail and tail != module:
        head, tail = split(head)
    
    content = open(filename).read()
    requirements, seen = set(), set()
    
    for match in modules(content):
        parts = match.group().split('.')
        
        for index in range(1, len(parts)):
            reqname = sep.join(parts[:index+1]) + '.js'
            reqpath = join(head, reqname)
            
            if reqpath in seen:
                continue
            
            seen.add(reqpath)
            
            if not exists(reqpath):
                continue
            
            if realpath(filename) == realpath(reqpath):
                continue
                
            requirements.add(reqname)

    return list(requirements)
    def __str__(self):
        s = []
        if self.args:
            if isinstance(self.args, tuple):
                if len(self.args) > 0:
                    s.append('LDAPMaximumRetriesError: ' + str(self.args[0]))
                if len(self.args) > 1:
                    s.append('Exception history:')
                    prev_exc = ''
                    for i, exc in enumerate(
                            self.args[1]
                    ):  # args[1] contains exception history
                        # if str(exc[1]) != prev_exc:
                        #     s.append((str(i).rjust(5) + ' ' + str(exc[0]) + ': ' + str(exc[1]) + ' - ' + str(exc[2])))
                        #     prev_exc = str(exc[1])
                        if str(exc) != prev_exc:
                            s.append((str(i).rjust(5) + ' ' + str(type(exc)) +
                                      ': ' + str(exc)))
                            prev_exc = str(exc)
                if len(self.args) > 2:
                    s.append('Maximum number of retries reached: ' +
                             str(self.args[2]))
        else:
            s = [LDAPExceptionError.__str__(self)]

        return sep.join(s)
예제 #46
0
 def select_parent_folder(self):
     """
         Set the current folder as the parent of the current folder.
     """
     self.current_index = 0
     if len(self.current_folder) > len(self.home_folder):
         self.current_folder = sep.join(self.current_folder.split(sep)[0:-1])
예제 #47
0
 def tearDown(self):
     dirs = ['h2o2', 'methyloxirane']
     for dir in dirs:
         path = sep.join([dir, '*'])
         for i in glob(path):
             remove(i)
         rmdir(dir)
예제 #48
0
 def test_from_file(self, fname):
     fpath = sep.join(["assets", fname])
     with open(fpath) as fl:
         circle = Circle.from_file(fpath)
         origin, radius = fl.readlines()
         assert circle.origin == Point.from_string(origin)
         assert circle.radius == float(radius.strip())
예제 #49
0
def calculate_outdir(indir):
    indir_separated = re.split(os_sep, path.abspath(indir))
    for idx in reversed(range(len(indir_separated))):
        if indir_separated[idx] == 'wav':
            indir_separated[idx] = 'flac'
            break
    return os_sep.join(indir_separated)
예제 #50
0
def treepaths(paths):
    assert isinstance(paths, list) or isinstance(paths, tuple)
    tree = {}
    for path in paths:
        str_path = str(path)
        subtree = tree
        path_segments = str_path.split(os_sep)
        for path_segment_i, _path_segment in enumerate(path_segments):
            if path_segment_i < len(path_segments) - 1:
                dir_path = os_sep.join(path_segments[:path_segment_i + 1])
                try:
                    subtree = subtree[dir_path]
                    # check if dir_path is set to a file
                    if not isinstance(subtree, dict):
                        raise ValueError(
                                  'subtree[' + dir_path + \
                                  '] already set to a file'
                              )
                except KeyError:
                    subtree[dir_path] = {}
                    subtree = subtree[dir_path]
            elif not str_path in subtree:
                subtree[str_path] = path
            else:
                raise ValueError(
                          'subtree[' + str_path + '] already set to' + \
                          subtree[str_path]
                      )

    while len(tree) == 1 and \
          isinstance(tree.values()[0], dict) and \
          len(tree.values()[0]) == 1:
        tree = tree.values()[0]

    return tree
예제 #51
0
    def save(self):
        output_file_name = ".".join(
            [self.product_code, self._data_file_suffix])
        output_file = open(
            path_sep.join([self._meta_dir_path, output_file_name]), "w")

        yaml.dump(self._contents, stream=output_file, default_flow_style=False)
def getSubjectList(dbroot, subject, obj=False):
	data = []
	if not obj:
		if subject["subject"] == "*":
			for i in listdir(dbroot):
				if isdir(AddSep(dbroot) + i):
					data.append(i)
			if len(data) < 1:
				data = [ "" ]
			return data
		if "*" in subject["mods"]:
			subject["mods"].remove("*")
			path = generateSubjectPath(dbroot, subject)
			for i in listdir(path):
				if isdir(AddSep(path) + i):
					data.append(i)
			return data
		else:
			return False
	else:
		if not obj["object"] == "Subject":
			return False
		if "*" in obj["mods"]:
			obj["mods"].remove("*")
			path = AddSep(dbroot) + subject["subject"] + sep.join(obj["mods"])
			for i in listdir(path):
				if isdir(AddSep(path) + i):
					data.append(i)
			return data
		else:
			return False
def customize_tests(paths, plugin_name, plugin_under):
    plugin_camel = "".join([word.title() for word in plugin_name.split("-")])
    tests_file_path = sep.join([paths['new_plugin_tests_dir'], 'template_tests.py'])
    tests_contents = get_file_contents(tests_file_path)
    tests_contents[1] = "import {} as sut\n".format(plugin_under)
    tests_contents[5] = "class {}Tests(unittest.TestCase):\n".format(plugin_camel)
    tests_contents[8] = "        result = sut.{}_example()\n".format(plugin_under)
    write_to_file(tests_contents, tests_file_path)
예제 #54
0
def resourceFromURI(uri, resourceClass):
    # TODO clean up
    (dummyscheme, dummyhost, path, dummyquery, dummyfragment) = urlsplit(uri)
    segments = path.split("/")
    assert segments[0] == "", "URL path didn't begin with '/': %s" % (path,)
    segments = map(unquote, segments[1:])
    path = repository + sep + sep.join(segments)
    return resourceClass(path)
예제 #55
0
파일: gen_tables.py 프로젝트: wusui/toybox
def get_page_info(rundle):
    # Read the results.json file for the appropriate group.
    #
    # rundle -- group name
    # returns:  data collected for this group by the computencaa.py script
    with open(sep.join(['data', rundle, 'results.json']), 'r') as f:
        saved_data = json.load(f)
    return saved_data
예제 #56
0
파일: amap.py 프로젝트: mshelton/sploitego
 def getversion(self):
     for p in environ['PATH'].split(pathsep):
         program = sep.join([p, 'amap'])
         if path.exists(program):
             self.program = program
             self.version = self.run([])
             return True
     return False
예제 #57
0
def urlPathFromPath(path):
    """
    URL-quote a file system path
    """
    return sep.join(
                       map(
                           quote, 
                           path.split(sep)))
예제 #58
0
파일: check_data.py 프로젝트: wusui/toybox
def read_text(group_name, file_txt):
    # Called from check_data to read a text file.
    #
    # group_name -- group we are checking.
    # file_txt -- name of the file we are reading
    in_file = sep.join(['data', group_name, file_txt])
    with open(in_file, 'r') as f:
        data = f.read()
    return data.strip().split('\n')