Beispiel #1
0
def only_changed(
    safelist: List[str],
    flags: int = glob.BRACE | glob.DOTGLOB | glob.GLOBSTAR | glob.NEGATE
) -> bool:
    """
    Return True if we're in a Pull Request and the only files changed by this PR are in
    the `safelist`. This function can be used to skip tests if only files named in the
    `safelist` have been modified.
    """
    github_type, files_changed = _change_detector.get_changed_files()
    if github_type != CI_PULL_REQUEST:
        # return False for all other types, including trains, to force all tests.
        return False
    assert files_changed is not None
    matches = glob.globfilter(files_changed, safelist, flags=flags)
    not_safe = set(files_changed) - set(matches)
    # When used in a `skipif`, this function is called during collection, so is
    # logged away from the test. Add the file and lineno to identify the test.
    caller = inspect.stack()[1]
    caller_id = '{}:{}'.format(trailing_path(caller.filename, 2),
                               caller.lineno)
    if not_safe:
        logging.info('%s: Changed files not in safelist: %s', caller_id,
                     tuple(not_safe))
        return False
    else:
        logging.info('%s: All changed files in safelist', caller_id)
        return True
    def run(self):
        """Run command."""

        self.syntaxes = [
            os.path.splitext(x)[0].replace('Packages/', '', 1)
            for x in glob.globfilter(sublime.find_resources('*'),
                                     '**/*.@(sublime-syntax|tmLanguage)',
                                     flags=GLOB_FLAGS | glob.I)
        ]

        if self.syntaxes:
            self.window.show_quick_panel(self.syntaxes, self.check_selection)
Beispiel #3
0
    def filter_excludes(arr: FrozenSet[Path],
                        excludes: List[str]) -> FrozenSet[Path]:
        """
        Returns all elements in arr that do not match any excludes pattern

        If excludes is empty, returns arr unchanged
        """
        if not excludes:
            return arr

        excludes = TargetManager.preprocess_path_patterns(excludes)
        return arr - frozenset(
            wcglob.globfilter(
                arr, excludes, flags=wcglob.GLOBSTAR | wcglob.DOTGLOB))
Beispiel #4
0
    def filter_includes(arr: FrozenSet[Path],
                        includes: Sequence[str]) -> FrozenSet[Path]:
        """
        Returns all elements in arr that match any includes pattern

        If includes is empty, returns arr unchanged
        """
        if not includes:
            return arr

        includes = TargetManager.preprocess_path_patterns(includes)
        return frozenset(
            wcglob.globfilter(arr,
                              includes,
                              flags=wcglob.GLOBSTAR | wcglob.DOTGLOB))
Beispiel #5
0
    def glob(self, pattern: str) -> List["DPPath"]:
        """Search path using the glob pattern.

        Parameters
        ----------
        pattern : str
            glob pattern
        
        Returns
        -------
        List[DPPath]
            list of paths
        """
        # got paths starts with current path first, which is faster
        subpaths = [ii for ii in self._keys if ii.startswith(self.name)]
        return list([
            type(self)("%s#%s" % (self.root_path, pp))
            for pp in globfilter(subpaths, self._connect_path(pattern))
        ])
def _list_agave(uri, globstr, agave):
    """
    List contents of agave URI.

    Args:
        uri: parsed URI to list.
        agave: dict that contains:
            agave_wrapper: Agave wrapper object.

    Returns:
        On success: a list of filenames (basenames only).
        On failure: False.

    """
    # recurse to depth based on glob
    depth = -1 if '**' in globstr else 1
    if depth == 1:
        depth = globstr.count('/')+1

    file_list = agave['agave_wrapper'].files_list(
        uri['authority'],
        uri['chopped_path'],
        depth=depth
    )

    if file_list is False:
        Log.an().error(
            'cannot get file list for uri: %s', uri['chopped_uri']
        )
        return False

    # apply glob pattern to filter file list
    path_len = len(uri['chopped_path'])+1
    globbed_file_list = glob.globfilter(
        [str(f['path']+'/'+f['name'])[path_len:] for f in file_list],
        globstr,
        flags=glob.EXTGLOB|glob.GLOBSTAR
    )

    return globbed_file_list
Beispiel #7
0
def only_changed(
    safelist: List[str],
    flags: int = glob.BRACE | glob.DOTGLOB | glob.GLOBSTAR | glob.NEGATE
) -> bool:
    """
    Return True if we're in a Pull Request and the only files changed by this PR are in
    the `safelist`. This function can be used to skip tests if only files named in the
    `safelist` have been modified.
    """
    # Ensure flags enable globstar required for default safelist
    flags |= glob.GLOBSTAR
    github_type, files_changed = _change_detector.get_changed_files()
    if github_type == CI_PULL_REQUEST:
        # use supplied safelist
        pass
    elif github_type == CI_TRAIN:
        # For trains we want to run if the build has been affected in any way,
        # but can skip for changes that do not affect the build or installation
        # of DC/OS. To do this, skip e2e tests only when files in the default
        # safelist are changed.
        safelist = E2E_SAFE_DEFAULT
    else:
        # return False for all other types to force all tests.
        return False
    assert files_changed is not None
    matches = glob.globfilter(files_changed, safelist, flags=flags)
    not_safe = set(files_changed) - set(matches)
    # When used in a `skipif`, this function is called during collection, so is
    # logged away from the test. Add the file and lineno to identify the test.
    caller = inspect.stack()[1]
    caller_id = '{}:{}'.format(trailing_path(caller.filename, 2),
                               caller.lineno)
    if not_safe:
        logging.info('%s: Changed files not in safelist: %s', caller_id,
                     tuple(not_safe))
        return False
    else:
        logging.info('%s: All changed files in safelist', caller_id)
        return True
Beispiel #8
0
    def _get_map_uri_list(self):
        """
        Get the contents of the map URI (agave URI).

        Args:
            self: class instance.

        Returns:
            Array of base file names in the map URI. Returns False on
            exception.

        """
        combined_file_list = []
        for uri in self._parsed_map_uris:
            # make sure map URI is compatible scheme (agave)
            if uri['scheme'] != 'agave':
                msg = 'invalid map uri scheme for this step: {}'.format(
                    uri['scheme']
                )
                Log.an().error(msg)
                return self._fatal(msg)

            # get file list from URI
            file_list = DataManager.list(
                parsed_uri=uri,
                globstr=self._step['map']['glob'],
                agave=self._agave
            )
            if file_list is False:
                msg = 'cannot get contents of map uri: {}'\
                    .format(uri['chopped_uri'])
                Log.an().error(msg)
                return self._fatal(msg)

            if self._step['map']['inclusive']:
                # filter with glob
                if glob.globfilter(
                    [uri['name']],
                    self._step['map']['glob'],
                    flags=glob.EXTGLOB|glob.GLOBSTAR
                ):
                    combined_file_list.append({
                        'chopped_uri': '{}://{}{}'.format(
                            uri['scheme'],
                            uri['authority'],
                            uri['folder']
                        ),
                        'filename': uri['name']
                    })

            for f in file_list:
                if '/' in f:
                    # reparse uri to correctly represent recursive elements
                    new_uri = URIParser.parse('{}/{}'.format(uri['chopped_uri'], f))
                    combined_file_list.append({
                        'chopped_uri': '{}://{}{}'.format(
                            new_uri['scheme'],
                            new_uri['authority'],
                            new_uri['folder']
                        ),
                        'filename': new_uri['name']
                    })
                else:
                    combined_file_list.append({
                        'chopped_uri': uri['chopped_uri'],
                        'filename': f
                    })

        return combined_file_list
        print('Flags?')
        print(case)
        print(flags, cls.flags)
        flags = cls.flags ^ flags
        pat = case[0] if isinstance(case[0], list) else [case[0]]
        if split and cls.skip_split:
            return
        if split:
            new_pat = []
            for x in pat:
                new_pat.extend(list(glob.globsplit(x, flags=flags)))
            pat = new_pat
        print("PATTERN: ", case[0])
        print("FILES: ", files)
        print("FLAGS: ", bin(flags))
        result = sorted(glob.globfilter(files, pat, flags=flags))
        source = sorted(cls.norm_files(case[1], flags))
        print("TEST: ", result, '<==>', source, '\n')
        cls.assert_equal(result, source)

    @pytest.mark.parametrize("case", cases)
    def test_glob_filter(self, case):
        """Test wildcard parsing."""

        _wcparse._compile.cache_clear()

        self._filter(case)

    @pytest.mark.parametrize("case", cases)
    def test_glob_split_filter(self, case):
        """Test wildcard parsing by first splitting on `|`."""
Beispiel #10
0
def to_system_data(f: h5py.File,
                   folder: str,
                   type_map: list = None,
                   labels: bool = True):
    """Load a HDF5 file.

    Parameters
    ----------
    f : h5py.File
        HDF5 file object
    folder : str
        path in the HDF5 file
    type_map : list
        type map
    labels : bool
        labels
    """
    g = f[folder] if folder else f

    data = {}
    data['atom_types'] = g['type.raw'][:]
    ntypes = np.max(data['atom_types']) + 1
    natoms = data['atom_types'].size
    data['atom_numbs'] = []
    for ii in range(ntypes):
        data['atom_numbs'].append(np.count_nonzero(data['atom_types'] == ii))
    data['atom_names'] = []
    # if find type_map.raw, use it
    if 'type_map.raw' in g.keys():
        my_type_map = list(np.char.decode(g['type_map.raw'][:]))
    # else try to use arg type_map
    elif type_map is not None:
        my_type_map = type_map
    # in the last case, make artificial atom names
    else:
        my_type_map = []
        for ii in range(ntypes):
            my_type_map.append('Type_%d' % ii)
    assert (len(my_type_map) >= len(data['atom_numbs']))
    for ii in range(len(data['atom_numbs'])):
        data['atom_names'].append(my_type_map[ii])

    data['orig'] = np.zeros([3])
    if 'nopbc' in g.keys():
        data['nopbc'] = True
    sets = globfilter(g.keys(), 'set.*')

    data_types = {
        'cells': {
            'fn': 'box',
            'labeled': False,
            'shape': (3, 3),
            'required': 'nopbc' not in data
        },
        'coords': {
            'fn': 'coord',
            'labeled': False,
            'shape': (natoms, 3),
            'required': True
        },
        'energies': {
            'fn': 'energy',
            'labeled': True,
            'shape': tuple(),
            'required': False
        },
        'forces': {
            'fn': 'force',
            'labeled': True,
            'shape': (natoms, 3),
            'required': False
        },
        'virials': {
            'fn': 'virial',
            'labeled': True,
            'shape': (3, 3),
            'required': False
        },
    }

    for dt, prop in data_types.items():
        all_data = []

        for ii in sets:
            set = g[ii]
            fn = '%s.npy' % prop['fn']
            if fn in set.keys():
                dd = set[fn][:]
                nframes = dd.shape[0]
                all_data.append(np.reshape(dd, (nframes, *prop['shape'])))
            elif prop['required']:
                raise RuntimeError("%s/%s/%s not found" % (folder, ii, fn))

        if len(all_data) > 0:
            data[dt] = np.concatenate(all_data, axis=0)
    return data
Beispiel #11
0
        flags = cls.flags ^ flags
        pat = case[0] if isinstance(case[0], list) else [case[0]]
        if split and cls.skip_split:
            return
        if split:
            new_pat = []
            for x in pat:
                new_pat.extend(list(glob.globsplit(x, flags=flags)))
            pat = new_pat
        print("PATTERN: ", case[0])
        print("FILES: ", files)
        print("FLAGS: ", bin(flags))
        result = sorted(
            glob.globfilter(
                files,
                pat,
                flags=flags
            )
        )
        source = sorted(cls.norm_files(case[1], flags))
        print("TEST: ", result, '<==>', source, '\n')
        cls.assert_equal(result, source)

    @pytest.mark.parametrize("case", cases)
    def test_glob_filter(self, case):
        """Test wildcard parsing."""

        _wcparse._compile.cache_clear()

        self._filter(case)
result = []
joinResult = []
for p1 in pairs1:
    for p2 in pairs2:
        if p1[0]['id'] == p2[2]['id'] and p1[1]['id'] == p2[0]['id'] and p1[2][
                'id'] == p2[1]['id']:  # agent compare
            result = [
                p1[0]['properties']['name'],
                p1[0]['properties']['affiliation'],  #Agent
                p1[2]['properties']['name'],
                p1[2]['properties']['date'],
                p1[1]['properties']['name'],  #Entity
                p1[1]['properties']['type'],
                p1[1]['properties']['price'],
                p1[1]['properties']['device']
            ]  #Activity

            delimiter = '-'
            joinResult.append(delimiter.join(result))

joinResult = list(set(joinResult))  #리스트 중복제거

searchTerm = ['*Emily*']
re = glob.globfilter(joinResult, searchTerm)
print(re)

#search term '*Emily*2018' 이런 식으로 넣으면 검색 가능
#js에서 검색 시 * 붙일 수 있도록 구현
#python에서 함수 호출
#json으로 export하는 cypher : CALL apoc.export.json.query("query 적기","filename.json",{})
Beispiel #13
0
    raise SystemExit(
        "This file is intended to be executed as an executable program. You cannot use it as a module."
        f"To run this script, run the ./{__file__} command"
    )

CONFIG_KEY = 'labelPRBasedOnFilePath'

current_files = subprocess.check_output(["git", "ls-files"]).decode().splitlines()
git_root = Path(subprocess.check_output(['git', 'rev-parse', '--show-toplevel']).decode().strip())
cyborg_config_path = git_root / ".github" / "boring-cyborg.yml"
cyborg_config = yaml.safe_load(cyborg_config_path.read_text())
if CONFIG_KEY not in cyborg_config:
    raise SystemExit(f"Missing section {CONFIG_KEY}")

errors = []
for label, patterns in cyborg_config[CONFIG_KEY].items():
    for pattern in patterns:
        if glob.globfilter(current_files, pattern, flags=glob.G | glob.E):
            continue
        yaml_path = f'{CONFIG_KEY}.{label}'
        errors.append(
            f"Unused pattern [{colored(pattern, 'cyan')}] in [{colored(yaml_path, 'cyan')}] section."
        )

if errors:
    print(f"Found {colored(str(len(errors)), 'red')} problems:")
    print("\n".join(errors))
    sys.exit(1)
else:
    print("No found problems. Have a good day!")