Exemple #1
0
    def search(self, what, cat="all"):
        """ Performs search """
        query = "".join(
            (
                self.url,
                "/index.php?page=torrents&search=",
                what,
                "&category=",
                self.supported_categories.get(cat, "0"),
                "&active=1",
            )
        )

        get_table = re_compile('(?s)<table\sclass="lista".*>(.*)</table>')
        data = get_table.search(retrieve_url(query)).group(0)
        # extract first ten pages of next results
        next_pages = re_compile('(?m)<option value="(.*)">[0-9]+</option>')
        next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]

        parser = self.MyHtmlParseWithBlackJack(self.url)
        parser.feed(data)
        parser.close()

        for page in next_pages:
            parser.feed(get_table.search(retrieve_url(page)).group(0))
            parser.close()
Exemple #2
0
    def search(self, what, cat='all'):
        """ Performs search """
        #prepare query
        cat = self.supported_categories[cat.lower()]
        query = "".join((self.url, "/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&uid=0&sort=S"))

        data = retrieve_url(query)

        add_res_list = re_compile("/files.*page=[0-9]+")
        torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
        data = torrent_list.search(data).group(0)
        list_results = add_res_list.findall(data)

        parser = self.MyHtmlParseWithBlackJack(self.url)
        parser.feed(data)

        del data

        if list_results:
            for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
                response = retrieve_url(self.url + search_query)
                parser.feed(torrent_list.search(response).group(0))
                parser.close()

        return
Exemple #3
0
    def search(self, what, cat='all'):
        """ Performs search """
        connection = https("www.demonoid.pw")

        #prepare query
        cat = self.supported_categories[cat.lower()]
        query = "".join(("/files/?category=", cat, "&subcategory=All&quality=All&seeded=2&external=2&query=", what, "&to=1&uid=0&sort=S"))

        connection.request("GET", query)
        response = connection.getresponse()
        if response.status != 200:
            return

        data = response.read().decode("utf-8")
        add_res_list = re_compile("/files.*page=[0-9]+")
        torrent_list = re_compile("start torrent list -->(.*)<!-- end torrent", DOTALL)
        data = torrent_list.search(data).group(0)
        list_results = add_res_list.findall(data)

        parser = self.MyHtmlParseWithBlackJack(self.url)
        parser.feed(data)

        del data

        if list_results:
            for search_query in islice((add_res_list.search(result).group(0) for result in list_results[1].split(" | ")), 0, 5):
                connection.request("GET", search_query)
                response = connection.getresponse()
                parser.feed(torrent_list.search(response.read().decode('utf-8')).group(0))
                parser.close()

        connection.close()
        return
Exemple #4
0
	def __WriteGHDLSection(self, binPath):
		if (self._host.Platform == "Windows"):
			ghdlPath = binPath / "ghdl.exe"
		else:
			ghdlPath = binPath / "ghdl"

		if not ghdlPath.exists():
			raise ConfigurationException("Executable '{0!s}' not found.".format(ghdlPath)) from FileNotFoundError(
				str(ghdlPath))

		# get version and backend
		output = check_output([str(ghdlPath), "-v"], universal_newlines=True)
		version = None
		backend = None
		versionRegExpStr = r"^GHDL (.+?) "
		versionRegExp = re_compile(versionRegExpStr)
		backendRegExpStr = r"(?i).*(mcode|gcc|llvm).* code generator"
		backendRegExp = re_compile(backendRegExpStr)
		for line in output.split('\n'):
			if version is None:
				match = versionRegExp.match(line)
				if match is not None:
					version = match.group(1)

			if backend is None:
				match = backendRegExp.match(line)
				if match is not None:
					backend = match.group(1).lower()

		if ((version is None) or (backend is None)):
			raise ConfigurationException("Version number or back-end name not found in '{0!s} -v' output.".format(ghdlPath))

		self._host.PoCConfig[self._section]['Version'] = version
		self._host.PoCConfig[self._section]['Backend'] = backend
Exemple #5
0
 def __init__(self, *args, **kwargs):
     """
 Parameters
 ----------
 ``*args`` and ``**kwargs``
     Parameters that shall be used for the substitution. Note that you can
     only provide either ``*args`` or ``**kwargs``, furthermore most of the
     methods like `get_sectionsf` require ``**kwargs`` to be provided."""
     if len(args) and len(kwargs):
         raise ValueError("Only positional or keyword args are allowed")
     self.params = args or kwargs
     patterns = {}
     all_sections = self.param_like_sections + self.text_sections
     for section in self.param_like_sections:
         patterns[section] = re_compile(
             '(?<=%s\n%s\n)(?s)(.+?)(?=\n\n\S+|$)' % (
                 section, '-'*len(section)))
     all_sections_patt = '|'.join(
         '%s\n%s\n' % (s, '-'*len(s)) for s in all_sections)
     # examples and see also
     for section in self.text_sections:
         patterns[section] = re_compile(
             '(?<=%s\n%s\n)(?s)(.+?)(?=%s|$)' % (
                 section, '-'*len(section), all_sections_patt))
     self.patterns = patterns
Exemple #6
0
def parse_scorematrix(name, smpath):
    with open(smpath) as fh:
        ws = re_compile(r'\s+')
        comment = re_compile(r'^\s*#')
        S = fh.read().split('\n')
        T = [s.strip() for s in S if not comment.match(s)]
        U = [ws.sub(' ', t).split(' ') for t in T if len(t) > 0]
        V = [u[1:] for u in U[1:]]
        W = [[int(w) for w in v] for v in V]
        lettersX = ''.join(U[0]).upper()
        lettersY = ''.join([u[0] for u in U[1:]]).upper()
        if len(lettersX) >= 20:
            letters = pletters
            klass = ProteinScoreMatrix
        else:
            letters = dletters
            klass = DNAScoreMatrix
        if not set(letters).issubset(set(lettersX + lettersY)):
            msg = "scoring matrix '%s' is insufficiently descriptive" % smpath
            raise RuntimeError(msg)
        if lettersX != lettersY or lettersX[:len(letters)] != letters:
            cols = [lettersX.index(l) for l in letters]
            rows = [lettersY.index(l) for l in letters]
            return klass(name, [[W[i][j] for j in cols] for i in rows])
        else:
            return klass(name, W)
	def setExclude(self, exclude):
		if exclude:
			self._exclude = (
				[re_compile(x) for x in exclude[0]],
				[re_compile(x) for x in exclude[1]],
				[re_compile(x) for x in exclude[2]],
				exclude[3]
			)
		else:
			self._exclude = ([], [], [], [])
	def setInclude(self, include):
		if include:
			self._include = (
				[re_compile(x) for x in include[0]],
				[re_compile(x) for x in include[1]],
				[re_compile(x) for x in include[2]],
				include[3]
			)
		else:
			self._include = ([], [], [], [])
Exemple #9
0
 def __init__(self, configuration=None, name=None):
     SimpleService.__init__(self, configuration=configuration, name=name)
     self.regex = dict(disks=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+\['
                                        r'(?P<total_disks>[0-9]+)/'
                                        r'(?P<inuse_disks>[0-9]+)\]'),
                       status=re_compile(r' (?P<array>[a-zA-Z_0-9]+) : active .+ '
                                         r'(?P<operation>[a-z]+) =[ ]{1,2}'
                                         r'(?P<operation_status>[0-9.]+).+finish='
                                         r'(?P<finish>([0-9.]+))min speed='
                                         r'(?P<speed>[0-9]+)'))
Exemple #10
0
    def getphylo(self, seqs, quiet=True):

        seqs = list(seqs)

        if self.__inputfile is None or not exists(self.__inputfile):
            fd, self.__inputfile = mkstemp()
            close(fd)

        with open(self.__inputfile, "w") as fh:
            SeqIO.write(seqs, fh, "fasta")

        newick_mangle = re_compile(r"[()]")

        id_descs = {}
        mangle = re_compile(r"[^a-zA-Z0-9]+", re_I)
        for r in seqs:
            newid = mangle.sub("_", "_".join((r.id, r.description))).rstrip("_")
            id_descs[newid] = (newick_mangle.sub("_", r.id).strip("_"), r.description)

        self.queuevar("_inputFile", self.__inputfile)
        self.runqueue()

        if not quiet:
            if self.stdout != "":
                print(self.stdout, file=stderr)
            if self.warnings != "":
                print(self.warnings, file=stderr)

        if self.stderr != "":
            raise RuntimeError(self.stderr)

        tree = self.getvar("tree", HyphyInterface.STRING)
        with closing(StringIO(self.getvar("ancestors", HyphyInterface.STRING))) as fh:
            fh.seek(0)
            ancestors = AlignIO.read(fh, "fasta")

        hyphymangling = re_compile(r"_[0-9]+$")

        for r in ancestors:
            key = r.id.rstrip("_unknown_description_").rstrip("_")
            if key not in id_descs:
                key = hyphymangling.sub("", key)
                if key not in id_descs:
                    continue
            # if the key exists, replace
            oldid, olddesc = id_descs[key]
            tree = tree.replace(r.id, oldid)
            r.id = oldid
            r.description = olddesc

        if tree[-1] != ";":
            tree += ";"

        return tree, ancestors
    def valid(record, is_dna=False):
        if is_dna:
            regexp = re_compile(r'[^ACGT]')
        else:
            regexp = re_compile(r'[^ACDEFGHIKLMNPQRSTVWY]')

        seq = regexp.sub('', str(record.seq))

        record.letter_annotations.clear()
        record.seq = Seq(seq, record.seq.alphabet)

        return record
    def pattern(self, format):
        processed_format = ''
        regex_chars = re_compile('([\\\\.^$*+?\\(\\){}\\[\\]|])')
        format = regex_chars.sub('\\\\\\1', format)
        whitespace_replacement = re_compile('\\s+')
        format = whitespace_replacement.sub('\\s+', format)
        while '%' in format:
            directive_index = format.index('%') + 1
            processed_format = '%s%s%s' % (processed_format, format[:directive_index - 1], self[format[directive_index]])
            format = format[directive_index + 1:]

        return '%s%s' % (processed_format, format)
	def _infer_i(self):
		from re import compile as re_compile
		pattern = re_compile("\%[0-9]*\.?[0-9]*[uifd]")
		match = pattern.split(self.path_format)
		glob_pattern = '*'.join(match)
		fpaths = glob(os.path.join(self.recording_dir, glob_pattern))

		i_pattern = re_compile('(?<={})[0-9]*(?={})'.format(*match))
		try:
			max_i = max([int(i_pattern.findall(i)[0]) for i in fpaths])
			i = max_i + 1
		except ValueError:
			i = 0
		return i
Exemple #14
0
def preprocess_seqrecords(seqrecords):
    remove_unknown = re_compile(r'[^ACGTUWSMKRYBDHVN]', re_I)
    strip_front = re_compile(r'^[N]+', re_I)
    strip_rear = re_compile(r'[N]+$', re_I)

    for record in seqrecords:
        seq = str(record.seq)
        seq = remove_unknown.sub('', seq)
        seq = strip_front.sub('', seq)
        seq = strip_rear.sub('', seq)

        record.seq = Seq(seq, generic_nucleotide)

    return
Exemple #15
0
def process_module(rootpath, modulepath):
    """
    build the contents of fname
    """
    mods_to_process = []
    pys_to_process = []
    hidden_py_re = re_compile(r'^__.*__\.py$')
    reg_py_re = re_compile(r'.*\.py$')
    for fname in listdir(modulepath):
        new_mod = join(modulepath, fname)
        if isfile(join(new_mod, '__init__.py')):
            mods_to_process.append(new_mod)
        elif reg_py_re.match(fname) and not hidden_py_re.match(fname):
            pys_to_process.append(new_mod)
    rel_mod_path = relpath(modulepath, rootpath)
    mod_nice_name = rel_mod_path.split(sep)
    new_nice_name = mod_nice_name[-1]
    mod_nice_name = []
    if '_' in new_nice_name and new_nice_name[0] != '_':
        mod_nice_name += new_nice_name.split('_')
    else:
        mod_nice_name.append(new_nice_name)
    mod_nice_name = [x[0].upper()+x[1:] for x in mod_nice_name]
    new_nice_name = ' '.join(mod_nice_name)
    if len(mods_to_process) or len(pys_to_process):
        new_file = open("%s.rst"%(rel_mod_path.replace(sep, '.')), "w")
        new_file.write("""
%s Module
**************************************************************

"""%(new_nice_name))

        if len(mods_to_process):
            new_file.write("""
Contents:

.. toctree::
   :maxdepth: 2

""")
            for new_mod in mods_to_process:
                new_file.write("   "+relpath(new_mod, rootpath).replace(sep, '.')+"\n")
        for new_mod in pys_to_process:
            new_file.write("""
.. automodule:: %s
   :members:
   :private-members:

"""%('.'.join(relpath(new_mod, rootpath).replace(sep, '.').split('.')[1:-1])))
        new_file.close()
Exemple #16
0
	def routeFinished(self, result, retval, extra_args):
		(iface, data, callback) = extra_args
		ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
		ipPattern = re_compile(ipRegexp)
		ipLinePattern = re_compile(ipRegexp)

		for line in result.splitlines():
			print line[0:7]
			if line[0:7] == "0.0.0.0":
				gateway = self.regExpMatch(ipPattern, line[16:31])
				if gateway:
					data['gateway'] = self.convertIP(gateway)

		self.ifaces[iface] = data
		self.loadNetworkConfig(iface,callback)
Exemple #17
0
def tox_vim_updater():
    """ Main function """
    old_date = tuple(int(number) for number in read_config())

    connection = https("tuxproject.de")
    connection.request("GET", "/projects/vim/")
    response = connection.getresponse()

    if response.status != 200:
        print("Failed to connect. Reason:", response.reason)
        return

    data = response.read().decode('utf-8')
    check_date = re_compile("[0-9]{4,}(-[0-9]{2,}){2,}")
    check_version = re_compile("[0-9]+.[0-9]+.[0-9]+")

    result_date = check_date.search(data)
    result_date = result_date.group(0)

    date = tuple(int(number) for number in result_date.split("-"))

    if not date > old_date:
        print("Vim is up-to-date")
        return

    result_version = check_version.search(data)
    version = result_version.group(0)

    print("New build is found:")
    print("Version:", version)
    print("Build date:", result_date)

    #update config
    with open(CONFIG_FILE, "w") as config:
        config.write(result_date)

    #64bit
    connection.request("GET", "/projects/vim/complete-x64.7z")
    response = connection.getresponse()

    if response.status != 200:
        print("Failed to connect. Reason:", response.reason)
        return

    with open("vim-x64.7z", "wb") as vim_file:
        vim_file.write(response.read())

    print("Succesfully downloaded vim-x64.7z")
Exemple #18
0
def calc_by_states(df):
    "calc parameters by state"
    plotdir = make_plotdir(plotdir='cms_state_service_plots/')
    agg_fns = ['count','median']
    p_group = calc_par_group(df, agg_fns, ['provider_type','nppes_provider_state'], ['pay_per_person','pay_per_service'], print_out=False)
#   print('index level provider_types\n', p_group.index.levels[0])

    bmap = get_basemap()  # read file once for all maps
#   minmax = (1.0, 2.7)
#   im = p_group.ix['Internal Medicine']['pay_per_service']['median']
#   print('%s\n' % provider, im)
#   make_state_map(bmap, im, plotdir, 'cost_per_service_internal_medicine', 'Internal Medicine, Median Cost Per Service')

#   im = p_group.ix['General Surgery']['pay_per_service']['median']
#   print('%s\n' % provider, im)
#   make_state_map(bmap, im, plotdir, 'cost_per_service_general_surgery', 'General Surgery, Median Cost Per Service')

#   im = p_group.ix['Physical Therapist']['pay_per_service']['median']
#   print('%s\n' % provider, im)
#   make_state_map(bmap, im, plotdir, 'cost_per_service_physical_therapist', 'Physical Therapist, Median Cost Per Service')

    patr = re_compile('[ (/)]+')
#   for provider in p_group.index.levels[0]:
#       im = p_group.ix[provider]['pay_per_service']['median']
#       make_state_map(bmap, im, plotdir, 'cost_per_service_%s' % '_'.join(patr.split(provider.lower())), '%s, Median Cost Per Service' % provider)

    plotdir = make_plotdir(plotdir='cms_state_person_plots/')
    for provider in p_group.index.levels[0]:
        im = p_group.ix[provider]['pay_per_person']['median']
        make_state_map(bmap, im, plotdir, 'cost_per_person_%s' % '_'.join(patr.split(provider.lower())), '%s, Median Cost Per Person' % provider)
def stockholm_rf_ranges(filename):
    hdr = re_compile('^#=GC\s+RF\s+(.+)$', re_I)
    keep = []
    with open(filename) as h:
        for line in h:
            m = hdr.match(line)
            if m:
                keep = [l not in '.-~' for l in m.group(1)]
                break
    ranges = []
    lwr = 0
    val = keep[lwr]
    for i, v in enumerate(keep):
        if v != val:
            # transition T->F, so append range
            if val:
                ranges.append((lwr, i))
            # transition F->T, so update lower bound
            else:
                lwr = i
            # update val
            val = v
    # if we have a terminal val, append final range
    if val:
        ranges.append((lwr, len(keep)))
    return ranges
Exemple #20
0
 def substitute_old_keys(self):
     self.vprint('replace UUIDs and remove unused UUIDs')
     key_ptn = re_compile('(?<=\s)([0-9A-Z]{24}|[0-9A-F]{32})(?=[\s;])')
     for line in fi_input(self.xcode_pbxproj_path, backup='.ubak', inplace=1):
         # project.pbxproj is an utf-8 encoded file
         line = line.decode('utf-8')
         key_list = key_ptn.findall(line)
         if not key_list:
             output_u8line(line)
         else:
             new_line = line
             # remove line with non-existing element
             if self.__result.get('to_be_removed') and any(
                     i for i in key_list if i in self.__result['to_be_removed']):
                 continue
             # remove incorrect entry that somehow does not exist in project node tree
             elif not all(self.__result.get(uuid) for uuid in key_list):
                 continue
             else:
                 for key in key_list:
                     new_key = self.__result[key]['new_key']
                     new_line = new_line.replace(key, new_key)
                 output_u8line(new_line)
     fi_close()
     tmp_path = self.xcode_pbxproj_path + '.ubak'
     if filecmp_cmp(self.xcode_pbxproj_path, tmp_path, shallow=False):
         unlink(self.xcode_pbxproj_path)
         rename(tmp_path, self.xcode_pbxproj_path)
         warning_print('Ignore uniquify, no changes made to "', self.xcode_pbxproj_path, sep='')
     else:
         unlink(tmp_path)
         self._is_modified = True
         success_print('Uniquify done')
Exemple #21
0
 def replace_uuids_with_file(self):
     self.vprint('replace UUIDs and remove unused UUIDs')
     uuid_ptn = re_compile('(?<=\s)[0-9A-F]{24}(?=[\s;])')
     for line in fi_input(self.xcode_pbxproj_path, backup='.bak', inplace=1):
         # project.pbxproj is an utf-8 encoded file
         line = line.decode('utf-8')
         uuid_list = uuid_ptn.findall(line)
         if not uuid_list:
             print(line.encode('utf-8'), end='')
         else:
             new_line = line
             # remove line with non-existing element
             if self.__result.get('to_be_removed') and any(
                     i for i in uuid_list if i in self.__result['to_be_removed']):
                 continue
             else:
                 for uuid in uuid_list:
                     new_line = new_line.replace(uuid, self.__result[uuid]['new_key'])
                 print(new_line.encode('utf-8'), end='')
     fi_close()
     tmp_path = self.xcode_pbxproj_path + '.bak'
     if filecmp_cmp(self.xcode_pbxproj_path, tmp_path, shallow=False):
         unlink(self.xcode_pbxproj_path)
         rename(tmp_path, self.xcode_pbxproj_path)
         print('Ignore uniquify, no changes made to', self.xcode_pbxproj_path)
     else:
         unlink(tmp_path)
         print('Uniquify done')
Exemple #22
0
 def __init__(self, url):
     HTMLParser.__init__(self)
     self.url = url
     self.current_item = None
     self.save_data = None
     self.seeds_leech = False
     self.size_repl = re_compile(",")
    def read(self):
        """
        Read a TLB file
        """
        with open(self.path, 'r') as tlb_file:
            re_parser = re_compile(r'^add\s-b\s(?P<begin>\w+)\s-s\s' \
                                       '(?P<size>[\w$()-]+)\s-t\s' \
                                       '(?P<type>\w+)\s-u\s' \
                                       '(?P<uuid>[\w-]+)\s' \
                                       '-l\s(?P<label>\w+)\s-T\s' \
                                       '(?P<tag>\w+)\s-P\s(?P<part>\w+)\s' \
                                       '(?P<device>[\w//]+)\s$')

            for line in tlb_file:
                debug('TLB reading line: {0}'.format(line))
                parsed_line = re_parser.match(line)

                if parsed_line:
                    debug('TLB parsed line: {0}'.format(line))
                    debug('\t begin: {0}'.format(parsed_line.group('begin')))
                    debug('\t size: {0}'.format(parsed_line.group('size')))
                    debug('\t type: {0}'.format(parsed_line.group('type')))
                    debug('\t uuid: {0}'.format(parsed_line.group('uuid')))
                    debug('\t label: {0}'.format(parsed_line.group('label')))
                    debug('\t tag: {0}'.format(parsed_line.group('tag')))
                    debug('\t part: {0}'.format(parsed_line.group('part')))
                    debug('\t device: {0}'.format(parsed_line.group('device')))

                    ntuple = TLB_INFO(*parsed_line.groups())
                    self.append(ntuple)
                else:
                    debug('TLB not parsed line: {0}'.format(line))
Exemple #24
0
 def get_version(self):
     pop = subprocess.Popen( path(self.get_bin_path())/"gcc --version",
                                        stdout=subprocess.PIPE)
     time.sleep(1)
     output = pop.stdout.read()
     reg = re_compile(r"(\d\.\d.\d)")
     return reg.search(output).group(1)
Exemple #25
0
def read_lammps_log(lammps_log="LOG"):
    """Method which reads a LAMMPS output log file."""

    if isinstance(lammps_log, str):
        f = open(lammps_log, 'r')
    else:
        f = lammps_log
    #thermo arguments
    thermo_args = ['step', 'temp', 'press', 'cpu', 
                        'pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz',
                        'ke', 'pe', 'etotal',
                        'vol', 'lx', 'ly', 'lz', 'atoms']
    thermo_mark = ' '.join([x.capitalize() for x in thermo_args[0:3]])
    f_re = r'([+-]?(?:(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?|nan|inf))'
    n = len(thermo_args)
    thermo_re = re_compile(r'^\s*' + r'\s+'.join([f_re]*n) + r'\s*$', flags=IGNORECASE)
    thermo_content = []
    line = f.readline()
    while line and "CALCULATION HAS FINISHED" not in line.strip():
        if line.startswith(thermo_mark):
            m = True
            while m:
                line = f.readline()
                m = thermo_re.match(line)
                if m:
                    # create a dictionary between each of the thermo_style args
                    # and it's corresponding value
                    thermo_content.append(dict(zip(thermo_args, map(float, m.groups()))))
        else:
            line = f.readline()
    f.close()
    return thermo_content
	def backupFiles(self, param):
		filename = param
		if not filename:
			filename = self.BACKUP_FILENAME
		invalidCharacters= re_compile(r'[^A-Za-z0-9_. ]+|^\.|\.$|^ | $|^$')
		tarFilename= "%s.tar" % invalidCharacters.sub('_', filename)
		backupFilename = path.join(self.BACKUP_PATH, tarFilename)
		if path.exists(backupFilename):
			remove(backupFilename)
		checkfile = path.join(self.BACKUP_PATH,'.autotimeredit')
		f = open(checkfile, 'w')
		if f:
			files = []
			f.write('created with AutoTimerWebEditor')
			f.close()
			files.append(checkfile)
			files.append("/etc/enigma2/autotimer.xml")
			tarFiles = ""
			for arg in files:
				if not path.exists(arg):
					return (False, "Error while preparing backup file, %s does not exists." % arg)
				tarFiles += "%s " % arg
			lines = popen("tar cvf %s %s" % (backupFilename,tarFiles)).readlines()
			remove(checkfile)
			return (True, tarFilename)
		else:
			return (False, "Error while preparing backup file.")
	def save(self):
		regex = re_compile('(.+)\.csv$')
		new_name = regex.sub('\\1_thetas.csv', self.file_name)
		fd = open(new_name, 'w')
		new_data = self.data_names + str(self.theta0) + ',' + str(self.theta1)
		fd.write(new_data)
		fd.close()
Exemple #28
0
	def _GetModelSimVersion(self, binPath):
		if (self._host.Platform == "Windows"):
			vsimPath = binPath / "vsim.exe"
		else:
			vsimPath = binPath / "vsim"

		if not vsimPath.exists():
			raise ConfigurationException("Executable '{0!s}' not found.".format(vsimPath)) from FileNotFoundError(
				str(vsimPath))

		# get version and backend
		try:
			output = check_output([str(vsimPath), "-version"], universal_newlines=True)
		except OSError as ex:
			raise ConfigurationException("Error while accessing '{0!s}'.".format(vsimPath)) from ex

		version = None
		versionRegExpStr = r"^.* vsim (.+?) "
		versionRegExp = re_compile(versionRegExpStr)
		for line in output.split('\n'):
			if version is None:
				match = versionRegExp.match(line)
				if match is not None:
					version = match.group(1)

		print(self._section, version)

		self._host.PoCConfig[self._section]['Version'] = version
Exemple #29
0
 def set_route(obj_):
     if not hasattr(obj, '_sp_custom_routes'):
         obj_._sp_custom_routes = []
     if route:
         obj_._sp_custom_routes.append(route)
     if re:
         obj_._sp_custom_routes.append(re_compile(re))
Exemple #30
0
def find_most_recent(guis):
    parser = re_compile(".*?(\d+)(?:\s(\d+)|\.)").match

    # todo
    # This is a mess. Just parse all dates in guis and sort.
    # Then, return list of filenames to backup, filename to rename,
    # and most recent date parsed.
    last_date = 0
    last_version = 1
    latest = None
    others = []
    for g in guis:
        groups = parser(g).groups()
        date = groups[0]
        try:
            n = int(groups[1])
        except IndexError:
            n = None

        idate = int(date)
        if idate > last_date:
            latest = g
            last_date = idate
            last_version = n or 1
        elif idate == last_date:
            if n is not None and n > last_version:
                latest = g
                last_version = n
        else:
            others.append(g)

    return last_version, latest, last_date
Exemple #31
0
    def read_lammps_log(self, lammps_log=None):
        # !TODO: somehow communicate 'thermo_content' explicitly
        """Method which reads a LAMMPS output log file."""

        if lammps_log is None:
            lammps_log = self.label + ".log"

        if isinstance(lammps_log, asestring):
            fileobj = paropen(lammps_log, "wb")
            close_log_file = True
        else:
            # Expect lammps_in to be a file-like object
            fileobj = lammps_log
            close_log_file = False

        # read_log depends on that the first (three) thermo_style custom args
        # can be capitilized and matched against the log output. I.e.
        # don't use e.g. 'ke' or 'cpu' which are labeled KinEng and CPU.
        _custom_thermo_mark = " ".join(
            [x.capitalize() for x in self.parameters.thermo_args[0:3]])

        # !TODO: regex-magic necessary?
        # Match something which can be converted to a float
        f_re = r"([+-]?(?:(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?|nan|inf))"
        n_args = len(self.parameters["thermo_args"])
        # Create a re matching exactly N white space separated floatish things
        _custom_thermo_re = re_compile(r"^\s*" + r"\s+".join([f_re] * n_args) +
                                       r"\s*$",
                                       flags=IGNORECASE)

        thermo_content = []
        line = fileobj.readline().decode("utf-8")
        while line and line.strip() != CALCULATION_END_MARK:
            # check error
            if 'ERROR:' in line:
                if close_log_file:
                    fileobj.close()
                raise RuntimeError(
                    'LAMMPS exits with error message: {}'.format(line))

            # get thermo output
            if line.startswith(_custom_thermo_mark):
                bool_match = True
                while bool_match:
                    line = fileobj.readline().decode("utf-8")
                    bool_match = _custom_thermo_re.match(line)
                    if bool_match:
                        # create a dictionary between each of the
                        # thermo_style args and it's corresponding value
                        thermo_content.append(
                            dict(
                                zip(
                                    self.parameters.thermo_args,
                                    map(float, bool_match.groups()),
                                )))
            else:
                line = fileobj.readline().decode("utf-8")

        if close_log_file:
            fileobj.close()

        self.thermo_content = thermo_content
Exemple #32
0
from re import compile as re_compile

from firestore import Firestore
from firestore import GoogleCalendarStorage
from response import forbidden_response
from response import settings_response
from response import text_response

# The scope to request for the Google Calendar API.
GOOGLE_CALENDAR_SCOPE = 'https://www.googleapis.com/auth/calendar.readonly'

# The URL where Google Calendar access can be revoked.
ACCOUNT_ACCESS_URL = 'https://myaccount.google.com/permissions'

# The regular expression a user key has to match.
KEY_PATTERN = re_compile('^[a-zA-Z0-9]{12}$')

# The time in milliseconds to return in an unauthorized next request.
NEXT_RETRY_DELAY_MILLIS = 5 * 60 * 1000  # 5 minutes


def next_retry_response():
    """Creates a response for a next request with a fixed retry time."""

    return text_response(str(NEXT_RETRY_DELAY_MILLIS))


def _oauth_url():
    """Creates the URL handling OAuth redirects."""

    return url_for('oauth', _external=True)
STORAGE_CONTAINER = "publicdata"
STORAGE_PATH = "assets/cms"

REPOSITORY_NAME = "coronavirus-dashboard-layouts"
REPOSITORY_BASE = f"{REPOSITORY_NAME}-{processor_settings.GITHUB_BRANCH}"
REPOSITORY_BASE_PATH = join_path(REPOSITORY_BASE, "Layouts")
REPOSITORY_ARCHIVE_URL = f"{REPOSITORY_NAME}/archive/{processor_settings.GITHUB_BRANCH}.zip".lower(
)

EXCLUDED_ITEMS = ["README.md", "LICENSE", ".github_utils", ".gitignore"]

DEV_ONLY_BLOCK_PATTERN = re_compile(r"""
    (
        (\s*[#\s]*){{\s*StartDevOnlyBlock\s*}}
        .+?
        (\2){{\s*EndDevOnlyBlock\s*}}
    )
""",
                                    flags=VERBOSE | DOTALL)

DEV_ONLY_LINE_PATTERN = re_compile(r"""
    ^(
        .*[#].*
        {{\s*DevOnly\s*}}
        .*
    )$
""",
                                   flags=VERBOSE | MULTILINE)


class ProcessedLayout(NamedTuple):
Exemple #34
0
class GroupAddress(BaseAddress):
    """Class for handling KNX group addresses."""

    MAX_MAIN = 31
    MAX_MIDDLE = 7
    MAX_SUB_LONG = 255
    MAX_SUB_SHORT = 2047
    MAX_FREE = 65535

    ADDRESS_RE = re_compile(
        r'^(?P<main>\d{1,2})(/(?P<middle>\d{1,2}))?/(?P<sub>\d{1,4})$')

    def __init__(self, address, levels=GroupAddressType.LONG):
        """Initialize Address class."""
        super(GroupAddress, self).__init__()
        self.levels = levels

        if isinstance(address, str) and not address.isdigit():
            self.raw = self.__string_to_int(address)
        elif isinstance(address, str) and address.isdigit():
            self.raw = int(address)
        elif isinstance(address, tuple) and len(address) == 2:
            self.raw = address_tuple_to_int(address)
        elif isinstance(address, int):
            self.raw = address
        elif address is None:
            self.raw = 0
        else:
            raise CouldNotParseAddress(address)

        if isinstance(self.raw, int) and self.raw > 65535:
            raise CouldNotParseAddress(address)

    def __string_to_int(self, address):
        """
        Parse `address` as string to an integer and do some simple checks.

        Returns the integer representation of `address` if all checks are valid:
        * string matches against the regular expression
        * main, middle and sub are inside its range

        In any other case, we raise an `CouldNotParseAddress` exception.
        """
        match = self.ADDRESS_RE.match(address)
        if not match:
            raise CouldNotParseAddress(address)
        main = int(match.group('main'))
        middle = int(match.group('middle')) if match.group(
            'middle') is not None else None
        sub = int(match.group('sub'))
        if main > self.MAX_MAIN:
            raise CouldNotParseAddress(address)
        if middle is not None:
            if middle > self.MAX_MIDDLE:
                raise CouldNotParseAddress(address)
            if sub > self.MAX_SUB_LONG:
                raise CouldNotParseAddress(address)
        else:
            if sub > self.MAX_SUB_SHORT:
                raise CouldNotParseAddress(address)
        return (main << 11) + (middle << 8) + sub if middle is not None else (
            main << 11) + sub

    @property
    def main(self):
        """
        Return the main group part as an integer.

        Works only if the group dont uses `GroupAddressType.FREE`, returns `None`
        in any other case.
        """
        return (
            self.raw >> 11
        ) & self.MAX_MAIN if self.levels != GroupAddressType.FREE else None

    @property
    def middle(self):
        """
        Return the middle group part as an integer.

        Works only if the group uses `GroupAddressType.LONG`, returns `None` in
        any other case.
        """
        return (
            self.raw >> 8
        ) & self.MAX_MIDDLE if self.levels == GroupAddressType.LONG else None

    @property
    def sub(self):
        """
        Return the sub group part as an integer.

        Works with any `GroupAddressType`, as we always have sub groups.
        """
        if self.levels == GroupAddressType.SHORT:
            return self.raw & self.MAX_SUB_SHORT
        elif self.levels == GroupAddressType.LONG:
            return self.raw & self.MAX_SUB_LONG
        return self.raw

    def __str__(self):
        """
        Return object as in KNX notation (e.g. '1/2/3').

        Honors the used `GroupAddressType` of this group.
        """
        if self.levels == GroupAddressType.LONG:
            return '{0.main}/{0.middle}/{0.sub}'.format(self)
        elif self.levels == GroupAddressType.SHORT:
            return '{0.main}/{0.sub}'.format(self)
        return '{0.sub}'.format(self)

    def __repr__(self):
        """Return object as readable string."""
        return 'GroupAddress("{0}")'.format(self)
class CdlUpdate(object):
    """
    class to update CDL file in a target directory from a given MKS sub project

    usage: see module header
    """
    #
    # Regular expressions for parsing section headers and options.
    #
    SECTION = re_compile(r'\['  # [
                         r'([^]]+)'  # very permissive!
                         r'\]'  # ]
                         )
    VALUEDEF = re_compile(r'([^:=\s][^:=]*)'  # very permissive!
                          r'\s*[:=]\s*'  # separator (either : or =),
                          r'(.*)$'  # everything up to eol
                          )

    def __init__(self, mks_project, target_dir, log_file=None):
        r"""
        setting up the sandbox:

        pass either the sandbox directory or the mks url of it,
        init will check if it's an existing directory and create an own sandbox if not.

        Option mks_project can be set to '' if read from cdl_files.ini file later.

        :param mks_project: sandbox dir or url of mks subproject where to find the CLD files,
        :type  mks_project: str
        :param target_dir:  directory storing the CDL files, e.g. \\lifs010\prj\ARS4D0\cdl\
        :type  target_dir:  str
        :param log_file: opt. path/file name of log file, default: <target_dir>/cdl_files.log
        :type  log_file: str
        """
        self._target_dir = target_dir
        self._sandbox = None
        self._mks_project = mks_project if mks_project else None
        self._mks = None
        self._cdl_versions = {}
        self._cdl_filename = None

        if not log_file:
            log_file = opath.join(self._target_dir, 'cdt_update.log')
        self._logger = log.Logger('CDL_update',
                                  level=log.INFO,
                                  filename=log_file)
        self._logger.info(
            'started CDL update for target {} and project {}'.format(
                target_dir, self._mks_project))

        if not self._mks_project:
            # get project from ini file if possible
            self.read_cfg()

    def _init_mks(self):
        """ internal setup mks connection to stored project

        either using existing sandbox if sandbox path is stored in _mks_project,
        or creating temporary sandbox if project name ("/nfs/projecte1/PROJECTS/.../project.pj") is stored

        :return:
        """
        if not self._mks_project:
            raise StkError('no mks project set to initiate mks!')

        self._logger.debug('setting up mks connection for {}'.format(
            self._mks_project))
        self._mks = mks_si.Si()

        if opath.exists(self._mks_project):
            self._sandbox = self._mks_project
            self._mks_project = None
        else:
            self._sandbox = mkdtemp()
            self._logger.debug('creating sandbox for {} in dir {}'.format(
                self._mks_project, self._sandbox))
            res = self._mks.createsandbox(self._mks_project,
                                          self._sandbox,
                                          skip_subpro=True)
            if res:
                self._logger.error("could not create sandbox for {}".format(
                    self._mks_project))

        # current requirement: there's just one cdl file in the directory:
        cdl_files = glob(opath.join(self._sandbox, '*.cdl'))
        if not cdl_files:
            self._logger.exception('no *.cdl file found in mks sandbox %s' %
                                   self._sandbox)
            raise StkError('no *.cdl file found in mks sandbox %s' %
                           self._sandbox)
        self._cdl_filename = cdl_files[0]

    def __del__(self):
        """delete temporary sandbox (if mks project url was initialised),
        otherwise leave passed sandbox as it is
        """
        if self._mks:
            self._logger.debug('dropping sandbox for %s' % self._mks_project)
            self._mks.dropsandbox(self._sandbox, delete='all')
            rmtree(self._sandbox, True)

    def update(self, no_checkout=False, versions=None):
        """
        update the missing files:

        - get list of CDL file labels,
        - create extended file names,
        - get list of existing files in target dir,
        - check out missing files, use list of versions if passed in call
        - write new cfg file (overwrite old one)

        needs parameters `_target_dir` and existing sandbox with cdl files in `_mks_sandbox`
        set in `__init__` method or during reading an existing cdl_files.ini

        :param no_checkout: flag to disable checkout of cdl files, default: False for backward compatibility
        :type  no_checkout: bool
        :param versions: list of sw versions to checkout cdl files for; version as declared in ini file
        :type  versions: list of str
        """
        self._init_mks()

        ini_file_name = opath.join(self._target_dir, INI_FILE_NAME)
        if opath.exists(ini_file_name):
            move(ini_file_name, ini_file_name + '.bak')

        # - get list of labels for that file
        lbl_dict = self._mks.get_file_labels(self._cdl_filename)

        # create extended list of all revisions
        ref_list_file_name = opath.join(self._target_dir, REF_LIST_FILE_NAME)
        dsc_dict = self._mks.get_revision_descriptions(self._cdl_filename)
        self._create_rev_list_file(lbl_dict, dsc_dict, ref_list_file_name)

        self._create_cdl_versions(lbl_dict)

        if not no_checkout:
            self._checkout_missing_files(versions)

        self.write_cfg(ini_file_name)

    def checkout_cdl_files(self, versions=None):
        """
        checkout files only without updating ini file

        :param versions: list of sw versions to checkout cdl files for
        :type  versions: list of str
        """
        self._init_mks()
        self._checkout_missing_files(version_list=versions)

    def _create_cdl_versions(self, lbl_dict):
        r"""
        create file names for all labels and store in internal _cdl_versions like

            {'4.3.8.1': {'Labels': ['SW_ARS4D0_04.03.08_INT-1'],
                         'Revision': '1.209.1.1.23',
                         'CDLFile': '\\lifs010\meta\ARS4D0\_CDL\ARS4D0_Appl_Release_1.209.1.1.23.cdl'}

        :param lbl_dict: dict of all checkpoints with csv of labels as returned by `stk.mks.si.Si.get_labels`
        :type  lbl_dict: dict
        :return:
        """
        # regular expressions for extracting sw version and int version from label
        # extract version and int no from "SW_ARS4D0_04.03.08_INT-1_RELEASE"
        lab_ver = re_compile(r'.*\D((\d\d)[\._](\d\d)[\._](\d\d\d?)).*'
                             )  # extract version 4.3.8 from label above
        lab_int = re_compile(r'.*\d\d[\._]\d\d[\._]\d\d(_INT-(\d+)?).*'
                             )  # extract int version '1' from label above
        self._cdl_versions = {}

        basename = opath.basename(self._cdl_filename)

        for cdl in lbl_dict:
            for label in [l.strip() for l in lbl_dict[cdl].split(',')]:
                # build version like 4.3.8.1 out of SW_ARS4D0_04.03.08_INT-1_RELEASE
                rel_match = lab_ver.match(label)
                if not rel_match:
                    self._logger.warning(
                        'can not parse version in label name {}, '
                        'valid label format is "##.##.###_INT-#"'.format(
                            label))
                    continue
                rel_vstr = rel_match.group(1)
                rel_version = '0.' + str(int(rel_match.group(2)))
                rel_version += '.' + str(int(rel_match.group(3)))
                rel_version += '.' + str(int(rel_match.group(4)))

                rel_match = lab_int.match(label)
                if rel_match:
                    rel_int = rel_match.group(2)
                else:
                    # !!! here we decide how to handle labels without INT-x:
                    # set to '0':
                    # if INT-x is found that one is newer than labels without any INT-x extension
                    #    (01.02.03_INT-4 is newer than 01.02.03_RELEASE)
                    # set to '999':
                    # all labels without INT-x are preferred before INT-x versions
                    #    (01.02.03_test_version is newer than 01.02.03_INT-3)
                    rel_int = '0'

                filename = opath.join(
                    self._target_dir,
                    opath.splitext(basename)[0] + '_' + cdl + '.cdl')

                if rel_version not in self._cdl_versions:
                    # new sw version: add to list
                    self._cdl_versions[rel_version] = {
                        'Labels': [label],
                        'Revision': cdl,
                        'CDLFile': filename
                    }
                elif self._cdl_versions[rel_version]['Revision'] == cdl:
                    # already found sw version: if same file revision just add the label
                    self._cdl_versions[rel_version]['Labels'].append(label)
                else:
                    # same sw version but different file revision:
                    # check INT value and use biggest for this sw version
                    for stored_label in self._cdl_versions[rel_version][
                            'Labels']:
                        if lab_ver.match(stored_label) and lab_ver.match(
                                stored_label).group(1) == rel_vstr:
                            if lab_int.match(stored_label) and int(
                                    lab_int.match(stored_label).group(
                                        2)) < int(rel_int):
                                # stored int version is smaller so this is the newer version, store this
                                self._logger.info((
                                    'replace version %s with new Revision %s, label %s, '
                                    'first stored with revision %s, labels %s'
                                    %
                                    (rel_version, cdl, label, self.
                                     _cdl_versions[rel_version]['Revision'],
                                     self._cdl_versions[rel_version]['Labels'])
                                ))
                                self._cdl_versions[rel_version][
                                    'Revision'] = cdl
                                self._cdl_versions[rel_version][
                                    'CDLFile'] = filename
                                self._cdl_versions[rel_version][
                                    'Labels'].remove(stored_label)
                                self._cdl_versions[rel_version][
                                    'Labels'].append(label)
                                # found the regarding level, leave this loop
                                break
                            else:
                                self._logger.info((
                                    'version %s of label %s, Revision %s '
                                    'already stored with newer revision %s, labels %s'
                                    %
                                    (rel_version, label, cdl, self.
                                     _cdl_versions[rel_version]['Revision'],
                                     self._cdl_versions[rel_version]['Labels'])
                                ))
        return

    def _checkout_missing_files(self, version_list=None):
        """
        checkout cdl files that are not stored in _target_dir

        :param version_list: sw versions to checkout cld files for,
                             versions as stored in cdl_files.ini with "Version ="
        :type  version_list: list of str
        """
        self._logger.info(
            'update target directory with cdl file versions of %s' %
            self._cdl_filename)
        # get list of existing files in target dir
        prj_files = u_dir.list_file_names(self._target_dir, '*.cdl')
        if version_list is None:
            versions = self._cdl_versions
        else:
            key_list = set(self._cdl_versions.keys()) & set(version_list)
            versions = {key: self._cdl_versions[key] for key in key_list}

        for cdl in versions:
            trgtname = opath.basename(self._cdl_versions[cdl]['CDLFile'])
            if trgtname not in prj_files:
                # checkout missing file and copy to project dir:
                srcfile = opath.join(self._sandbox, self._cdl_filename)
                res = self._mks.co(
                    srcfile,
                    lock=False,
                    revision=self._cdl_versions[cdl]['Revision'])
                self._logger.debug(res)

                copyfile(srcfile, opath.join(self._target_dir, trgtname))
                prj_files.append(trgtname)
                self._logger.info('added file %s' % trgtname)
        return

    def write_cfg(self, filename):
        """
        write config to a new file in win ini style

        :param filename: path and filename to write to
        """
        with open(filename, mode='w') as ini_file:
            ini_file.write(INI_FILE_HEADER)
            ini_file.write('[MksProject]\n')
            ini_file.write('ProjectName = ' + self._mks_project + '\n\n')
            for cdl in sorted(self._cdl_versions.keys()):
                ini_file.write('[VersionMapping]\n')
                ini_file.write('Version = ' + cdl + '\n')
                ini_file.write('Revision = ' +
                               self._cdl_versions[cdl]['Revision'] + '\n')
                ini_file.write('Labels = ' +
                               ', '.join(self._cdl_versions[cdl]['Labels']) +
                               '\n')
                ini_file.write('CDLFile = ' +
                               self._cdl_versions[cdl]['CDLFile'] + '\n')
                ini_file.write('\n')
        no_files = len(
            set([ver['Revision'] for ver in self._cdl_versions.values()]))
        self._logger.info(
            "new ini file created: %d versions mapped to %d files: %s" %
            (len(self._cdl_versions), no_files, filename))

    def read_cfg(self):
        """
        read ini file from target dir into internal cdl structure like::

            {'4.3.8.1': {'Labels': ['SW_ARS4D0_04.03.08_INT-1'],
                         'Revision': '1.209.1.1.23',
                         'CDLFile': 'ARS4D0_Appl_Release_1.209.1.1.23.cdl'}

        ini file structure has to follow strictly the written format::

            [VersionMapping]
            Version = 0.1.2.3
            ...

        with **Version as the first line in the section!!**

        also reads MksProject setting to store in internal path for check out::

            [MksProject]
            ProjectName = /nfs/projekte1/REPOSITORY/.../project.pj

        :return: cdl mapping
        :rtype:  dict
        """
        ini_file_name = opath.join(self._target_dir, INI_FILE_NAME)
        if not opath.exists(ini_file_name):
            # self._logger.info("Couldn't read ini file '%s', exception:\n%s" % (ini_file_name, err))
            return {}
        cdl_map = {}
        cdl_ver = ''
        mapping = False
        project = False

        with open(ini_file_name, "r") as inifile:
            for line in inifile:
                if line.strip() == '' or line[0] in '#;':
                    continue
                if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
                    # no leading whitespace
                    continue

                # skip to VersionMapping part:
                sect = self.SECTION.match(line)
                if sect and sect.group(1) == 'VersionMapping':
                    mapping = True
                    cdl_ver = ''
                    continue
                elif sect and sect.group(1) == 'MksProject':
                    project = True
                    mapping = False
                    continue

                if mapping or project:
                    setting = self.VALUEDEF.match(line)
                    opt = setting.group(1).strip()
                    val = setting.group(2).strip()
                    if opt == 'Version':
                        cdl_ver = val
                        cdl_map[cdl_ver] = {}
                    elif opt == 'ProjectName':
                        self._mks_project = setting.group(2).strip()
                        project = False
                    else:
                        if opt == 'Labels':
                            val = [v.strip() for v in val.split(',')]
                        cdl_map[cdl_ver][opt] = val
        self._cdl_versions = cdl_map
        return cdl_map

    @staticmethod
    def _create_rev_list_file(lbl_dict, dsc_dict, filename):
        """ create a file giving for all revisions the passed labels and descriptions

        :param lbl_dict: labels of revisions to save in file
        :type  lbl_dict: dict
        :param dsc_dict: descriptions of revisions (only first line expected, see code for details)
        :type  dsc_dict: dict
        :param filename: path and file name where to store
        :type  filename: str
        """
        # check that found line starts with a revision number
        # needed to keep only 1st line of multi line descriptions (assuming sw version is stored there)
        pattern = r"(^\d*\.\d*.*)$"

        # write in tab separated columns: rev, 'label eq descr', label, description (first line)
        with open(filename, 'w') as revfile:
            revfile.write('rev\teq str\tlabel\tdescription\n')
            # pass list of all revisions in descriptions and labels
            for rev in sorted(set(dsc_dict).union(lbl_dict)):
                if re_compile(pattern).search(rev):
                    revfile.write('{}\t{}\t{}\t{}\n'.format(
                        rev, ('==' if lbl_dict.get(rev, '') == dsc_dict.get(
                            rev, '') else ''), lbl_dict.get(rev, ''),
                        dsc_dict.get(rev, '')))
Exemple #36
0
    def changeDir(self, directory, select=None):
        self.list = []

        # if we are just entering from the list of mount points:
        if self.current_directory is None:
            if directory and self.showMountpoints:
                self.current_mountpoint = self.getMountpointLink(directory)
            else:
                self.current_mountpoint = None
        self.current_directory = directory
        directories = []
        files = []

        if directory is None and self.showMountpoints:  # present available mountpoints
            for p in harddiskmanager.getMountedPartitions():
                path = os_path.join(p.mountpoint, "")
                if path not in self.inhibitMounts and not self.inParentDirs(
                        path, self.inhibitDirs):
                    self.list.append(
                        MultiFileSelectEntryComponent(name=p.description,
                                                      absolute=path,
                                                      isDir=True))
            files = []
            directories = []
        elif directory is None:
            files = []
            directories = []
        elif self.useServiceRef:
            root = eServiceReference("2:0:1:0:0:0:0:0:0:0:" + directory)
            if self.additional_extensions:
                root.setName(self.additional_extensions)
            serviceHandler = eServiceCenter.getInstance()
            list = serviceHandler.list(root)

            while 1:
                s = list.getNext()
                if not s.valid():
                    del list
                    break
                if s.flags & s.mustDescent:
                    directories.append(s.getPath())
                else:
                    files.append(s)
            directories.sort()
            files.sort()
        else:
            if fileExists(directory):
                try:
                    files = listdir(directory)
                except:
                    files = []
                files.sort()
                tmpfiles = files[:]
                for x in tmpfiles:
                    if os_path.isdir(directory + x):
                        directories.append(directory + x + "/")
                        files.remove(x)

        if directory is not None and self.showDirectories and not self.isTop:
            if directory == self.current_mountpoint and self.showMountpoints:
                self.list.append(
                    MultiFileSelectEntryComponent(
                        name="<" + _("List of Storage Devices") + ">",
                        absolute=None,
                        isDir=True))
            elif (directory != "/") and not (self.inhibitMounts
                                             and self.getMountpoint(directory)
                                             in self.inhibitMounts):
                self.list.append(
                    MultiFileSelectEntryComponent(
                        name="<" + _("Parent Directory") + ">",
                        absolute='/'.join(directory.split('/')[:-2]) + '/',
                        isDir=True))

        if self.showDirectories:
            for x in directories:
                if not (self.inhibitMounts and self.getMountpoint(x)
                        in self.inhibitMounts) and not self.inParentDirs(
                            x, self.inhibitDirs):
                    name = x.split('/')[-2]
                    alreadySelected = False
                    for entry in self.selectedFiles:
                        if entry == x:
                            alreadySelected = True
                    if alreadySelected:
                        self.list.append(
                            MultiFileSelectEntryComponent(name=name,
                                                          absolute=x,
                                                          isDir=True,
                                                          selected=True))
                    else:
                        self.list.append(
                            MultiFileSelectEntryComponent(name=name,
                                                          absolute=x,
                                                          isDir=True,
                                                          selected=False))

        if self.showFiles:
            for x in files:
                if self.useServiceRef:
                    path = x.getPath()
                    name = path.split('/')[-1]
                else:
                    path = directory + x
                    name = x

                if (self.matchingPattern is None) or re_compile(
                        self.matchingPattern).search(path):
                    alreadySelected = False
                    for entry in self.selectedFiles:
                        if os_path.basename(entry) == x:
                            alreadySelected = True
                    if alreadySelected:
                        self.list.append(
                            MultiFileSelectEntryComponent(name=name,
                                                          absolute=x,
                                                          isDir=False,
                                                          selected=True))
                    else:
                        self.list.append(
                            MultiFileSelectEntryComponent(name=name,
                                                          absolute=x,
                                                          isDir=False,
                                                          selected=False))

        self.l.setList(self.list)

        if select is not None:
            i = 0
            self.moveToIndex(0)
            for x in self.list:
                p = x[0][0]

                if isinstance(p, eServiceReference):
                    p = p.getPath()

                if p == select:
                    self.moveToIndex(i)
                i += 1
Exemple #37
0
 def parse(self, filename, root, watch, start, end):
     """Call the appropriate handler for each line of the file."""
     with open(filename) as lines:
         parse_line = re_compile(r'"([^"]*)",?').findall
         for line in lines:
             self.parse_header(parse_line(line), self.columns)
Exemple #38
0
        """Die a horrible death."""
        for polygon in self.polygons:
            MapView.remove_layer(polygon)
        self.polygons.clear()
        self.widgets.trackfile_settings.destroy()
        del self.cache[self.filename]
        TrackFile.instances.discard(self)
        points.clear()
        for trackfile in TrackFile.instances:
            points.update(trackfile.tracks)
        TrackFile.update_range()


# GPX files use ISO 8601 dates, which look like 2010-10-16T20:09:13Z.
# This regex splits that up into a list like 2010, 10, 16, 20, 09, 13.
split = re_compile(r'[:T.Z-]').split


@memoize
class GPXFile(TrackFile):
    """Support for the open GPS eXchange format."""

    def __init__(self, filename):
        TrackFile.__init__(self, filename, 'gpx', ('trkseg', 'trkpt'))

    def element_end(self, name, state):
        """Collect and use all the parsed data."""
        try:
            timestamp = timegm(list(map(int, split(state['time'])[0:6])))
            lat = float(state['lat'])
            lon = float(state['lon'])
Exemple #39
0
from ..translations import categories_strings

# ============================================================================
# >> ALL DECLARATION
# ============================================================================
__all__ = (
    'parse_ini_items',
    'parse_ini_races',
    'parse_key_items',
    'parse_key_races',
)

# ============================================================================
# >> GLOBAL VARIABLES
# ============================================================================
FIX_NAME = re_compile(r'\W')


# ============================================================================
# >> CLASSES
# ============================================================================
class ImportedRace(RaceSetting):
    def __init__(self, name, type_):
        self.name = name
        self.type = type_
        self.module = None

        self.config = {}
        self.strings = {}

        self.config['categories'] = []
Exemple #40
0
    def init(self) -> None:
        print("*** config.json Configuration Builder ***\n")

        if isfile('config.json'):
            print("config.json already exists.\n")
            sys_exit()

        config = {}

        choice = input(
            "Do you have API keys for the Coinbase Pro exchange (1=yes, 2=no:default)? "
        )
        if choice == '1':
            self._c = 1
            config['coinbasepro'] = {}
            config['coinbasepro']['api_url'] = 'https://api.pro.coinbase.com'

            print("\n")

            while 'api_key' not in config['coinbasepro']:
                api_key = input("What is your Coinbase Pro API Key? ")
                p = re_compile(r"^[a-f0-9]{32,32}$")
                if p.match(api_key):
                    config['coinbasepro']['api_key'] = api_key

            while 'api_secret' not in config['coinbasepro']:
                api_secret = input("What is your Coinbase Pro API Secret? ")
                p = re_compile(r"^[A-z0-9+\/]+==$")
                if p.match(api_secret):
                    config['coinbasepro']['api_secret'] = api_secret

            while 'api_passphrase' not in config['coinbasepro']:
                api_passphrase = input(
                    "What is your Coinbase Pro API Passphrase? ")
                p = re_compile(r"^[a-z0-9]{10,11}$")
                if p.match(api_passphrase):
                    config['coinbasepro']['api_passphrase'] = api_passphrase

            print("\n")

            config['coinbasepro']['config'] = {}

            while 'base_currency' not in config['coinbasepro']['config']:
                base_currency = input(
                    "What is your Coinbase Pro base currency (what you are buying) E.g. BTC? "
                )
                p = re_compile(r"^[A-Z0-9]{3,7}$")
                if p.match(base_currency):
                    config['coinbasepro']['config'][
                        'base_currency'] = base_currency

            while 'quote_currency' not in config['coinbasepro']['config']:
                quote_currency = input(
                    "What is your Coinbase Pro quote currency (what you are buying with) E.g. GBP? "
                )
                p = re_compile(r"^[A-Z0-9]{3,7}$")
                if p.match(quote_currency):
                    config['coinbasepro']['config'][
                        'quote_currency'] = quote_currency

            print("\n")

            choice = input(
                "Do you want to smart switch between 1 hour and 15 minute intervals (1=yes:default, 2=no)? "
            )
            if choice == '2':
                while 'granularity' not in config['coinbasepro']['config']:
                    choice = input(
                        "What granularity do you want to trade? (60, 300, 900, 3600, 21600, 86400)? "
                    )
                    if int(choice) in [60, 300, 900, 3600, 21600, 86400]:
                        config['coinbasepro']['config']['granularity'] = int(
                            choice)

            print("\n")

            choice = input(
                "Do you want to start live trading? (1=live, 2=test:default)? "
            )
            if choice == '1':
                config['coinbasepro']['config']['live'] = 1
            else:
                config['coinbasepro']['config']['live'] = 0

            print("\n")

        choice = input(
            "Do you have API keys for the Binance exchange (1=yes, 2=no:default)? "
        )
        if choice == '1':
            self._b = 1
            config['binance'] = {}
            config['binance']['api_url'] = 'https://api.binance.com'

            print("\n")

            while 'api_key' not in config['binance']:
                api_key = input("What is your Binance API Key? ")
                p = re_compile(r"^[A-z0-9]{64,64}$")
                if p.match(api_key):
                    config['binance']['api_key'] = api_key

            while 'api_secret' not in config['binance']:
                api_secret = input("What is your Binance API Secret? ")
                p = re_compile(r"^[A-z0-9]{64,64}$")
                if p.match(api_secret):
                    config['binance']['api_secret'] = api_secret

            print("\n")

            config['binance']['config'] = {}

            while 'base_currency' not in config['binance']['config']:
                base_currency = input(
                    "What is your Binance base currency (what you are buying) E.g. BTC? "
                )
                p = re_compile(r"^[A-Z0-9]{3,7}$")
                if p.match(base_currency):
                    config['binance']['config'][
                        'base_currency'] = base_currency

            while 'quote_currency' not in config['binance']['config']:
                quote_currency = input(
                    "What is your Binance quote currency (what you are buying with) E.g. GBP? "
                )
                p = re_compile(r"^[A-Z0-9]{3,7}$")
                if p.match(quote_currency):
                    config['binance']['config'][
                        'quote_currency'] = quote_currency

            print("\n")

            choice = input(
                "Do you want to smart switch between 1 hour and 15 minute intervals (1=yes:default, 2=no)? "
            )
            if choice == '2':
                while 'granularity' not in config['binance']['config']:
                    choice = input(
                        "What granularity do you want to trade? (1m, 5m, 15m, 1h, 6h, 1d)? "
                    )
                    if choice in ['1m', '5m', '15m', '1h', '6h', '1d']:
                        config['binance']['config']['granularity'] = choice

            print("\n")

            choice = input(
                "Do you want to start live trading? (1=live, 2=test:default)? "
            )
            if choice == '1':
                config['binance']['config']['live'] = 1
            else:
                config['binance']['config']['live'] = 0

            print("\n")

        choice = input(
            "Do you have a Telegram Token and Client ID (1=yes, 2=no:default)? "
        )
        if choice == '1':
            self._t = 1
            config['telegram'] = {}

            print("\n")

            while 'token' not in config['telegram']:
                token = input("What is your Telegram token? ")
                p = re_compile(r"^\d{1,10}:[A-z0-9-_]{35,35}$")
                if p.match(token):
                    config['telegram']['token'] = token

            while 'client_id' not in config['telegram']:
                client_id = input("What is your Telegram client ID? ")
                p = re_compile(r"^-*\d{7,10}$")
                if p.match(client_id):
                    config['telegram']['client_id'] = client_id

            print("\n")

        choice = input(
            "Do you want to ever sell at a loss even to minimise losses (1:yes, 2=no:default)? "
        )
        if choice == '1':
            if self._c == 1:
                config['coinbasepro']['config']['sellatloss'] = 1
            if self._b == 1:
                config['binance']['config']['sellatloss'] = 1

        choice = input(
            "Do you want to sell at the next resistance? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['sellatresistance'] = 1
            if self._b == 1:
                config['binance']['config']['sellatresistance'] = 1

        choice = input(
            "Do you only want to trade in a bull market SMA50 > SMA200? (1:yes, 2=no:default)? "
        )
        if choice != '1':
            if self._c == 1:
                config['coinbasepro']['config']['disablebullonly'] = 1
            if self._b == 1:
                config['binance']['config']['disablebullonly'] = 1

        choice = input(
            "Do you want to avoid buying when the price is too high? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['disablebuynearhigh'] = 1
            if self._b == 1:
                config['binance']['config']['disablebuynearhigh'] = 1

        choice = input(
            "Do you want to disable the On-Balance Volume (OBV) technical indicator on buys? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['disablebuyobv'] = 1
            if self._b == 1:
                config['binance']['config']['disablebuyobv'] = 1

        choice = input(
            "Do you want to disable the Elder-Ray Index on buys? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['disablebuyelderray'] = 1
            if self._b == 1:
                config['binance']['config']['disablebuyelderray'] = 1

        choice = input(
            "Do you want to disable saving the CSV tracker on buy and sell events? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['disabletracker'] = 1
            if self._b == 1:
                config['binance']['config']['disabletracker'] = 1

        choice = input(
            "Do you want to disable writing to the log file? (1:yes, 2=no:default)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['disablelog'] = 1
            if self._b == 1:
                config['binance']['config']['disablelog'] = 1

        choice = input(
            "Do you want the bot to auto restart itself on failure? (1:yes:default, 2=no)? "
        )
        if choice != '2':
            if self._c == 1:
                config['coinbasepro']['config']['autorestart'] = 1
            if self._b == 1:
                config['binance']['config']['autorestart'] = 1

        print("\n")

        try:
            config_json = dumps(config, indent=4)
            fh = open('./config.json', 'w')
            fh.write(config_json)
            print("config.json saved!\n")
            fh.close()
        except Exception as err:
            print(err)

        return None
    def _create_cdl_versions(self, lbl_dict):
        r"""
        create file names for all labels and store in internal _cdl_versions like

            {'4.3.8.1': {'Labels': ['SW_ARS4D0_04.03.08_INT-1'],
                         'Revision': '1.209.1.1.23',
                         'CDLFile': '\\lifs010\meta\ARS4D0\_CDL\ARS4D0_Appl_Release_1.209.1.1.23.cdl'}

        :param lbl_dict: dict of all checkpoints with csv of labels as returned by `stk.mks.si.Si.get_labels`
        :type  lbl_dict: dict
        :return:
        """
        # regular expressions for extracting sw version and int version from label
        # extract version and int no from "SW_ARS4D0_04.03.08_INT-1_RELEASE"
        lab_ver = re_compile(r'.*\D((\d\d)[\._](\d\d)[\._](\d\d\d?)).*'
                             )  # extract version 4.3.8 from label above
        lab_int = re_compile(r'.*\d\d[\._]\d\d[\._]\d\d(_INT-(\d+)?).*'
                             )  # extract int version '1' from label above
        self._cdl_versions = {}

        basename = opath.basename(self._cdl_filename)

        for cdl in lbl_dict:
            for label in [l.strip() for l in lbl_dict[cdl].split(',')]:
                # build version like 4.3.8.1 out of SW_ARS4D0_04.03.08_INT-1_RELEASE
                rel_match = lab_ver.match(label)
                if not rel_match:
                    self._logger.warning(
                        'can not parse version in label name {}, '
                        'valid label format is "##.##.###_INT-#"'.format(
                            label))
                    continue
                rel_vstr = rel_match.group(1)
                rel_version = '0.' + str(int(rel_match.group(2)))
                rel_version += '.' + str(int(rel_match.group(3)))
                rel_version += '.' + str(int(rel_match.group(4)))

                rel_match = lab_int.match(label)
                if rel_match:
                    rel_int = rel_match.group(2)
                else:
                    # !!! here we decide how to handle labels without INT-x:
                    # set to '0':
                    # if INT-x is found that one is newer than labels without any INT-x extension
                    #    (01.02.03_INT-4 is newer than 01.02.03_RELEASE)
                    # set to '999':
                    # all labels without INT-x are preferred before INT-x versions
                    #    (01.02.03_test_version is newer than 01.02.03_INT-3)
                    rel_int = '0'

                filename = opath.join(
                    self._target_dir,
                    opath.splitext(basename)[0] + '_' + cdl + '.cdl')

                if rel_version not in self._cdl_versions:
                    # new sw version: add to list
                    self._cdl_versions[rel_version] = {
                        'Labels': [label],
                        'Revision': cdl,
                        'CDLFile': filename
                    }
                elif self._cdl_versions[rel_version]['Revision'] == cdl:
                    # already found sw version: if same file revision just add the label
                    self._cdl_versions[rel_version]['Labels'].append(label)
                else:
                    # same sw version but different file revision:
                    # check INT value and use biggest for this sw version
                    for stored_label in self._cdl_versions[rel_version][
                            'Labels']:
                        if lab_ver.match(stored_label) and lab_ver.match(
                                stored_label).group(1) == rel_vstr:
                            if lab_int.match(stored_label) and int(
                                    lab_int.match(stored_label).group(
                                        2)) < int(rel_int):
                                # stored int version is smaller so this is the newer version, store this
                                self._logger.info((
                                    'replace version %s with new Revision %s, label %s, '
                                    'first stored with revision %s, labels %s'
                                    %
                                    (rel_version, cdl, label, self.
                                     _cdl_versions[rel_version]['Revision'],
                                     self._cdl_versions[rel_version]['Labels'])
                                ))
                                self._cdl_versions[rel_version][
                                    'Revision'] = cdl
                                self._cdl_versions[rel_version][
                                    'CDLFile'] = filename
                                self._cdl_versions[rel_version][
                                    'Labels'].remove(stored_label)
                                self._cdl_versions[rel_version][
                                    'Labels'].append(label)
                                # found the regarding level, leave this loop
                                break
                            else:
                                self._logger.info((
                                    'version %s of label %s, Revision %s '
                                    'already stored with newer revision %s, labels %s'
                                    %
                                    (rel_version, label, cdl, self.
                                     _cdl_versions[rel_version]['Revision'],
                                     self._cdl_versions[rel_version]['Labels'])
                                ))
        return
Exemple #42
0
- apt_key: data="{{ lookup('file', 'apt.gpg') }}" state=present

# Add an Apt signing key to a specific keyring file
- apt_key: id=473041FA url=https://ftp-master.debian.org/keys/archive-key-6.0.asc keyring=/etc/apt/trusted.gpg.d/debian.gpg state=present
'''

# FIXME: standardize into module_common
from traceback import format_exc
from re import compile as re_compile
# FIXME: standardize into module_common
from distutils.spawn import find_executable
from os import environ
from sys import exc_info
import traceback

match_key = re_compile("^gpg:.*key ([0-9a-fA-F]+):.*$")

REQUIRED_EXECUTABLES = ['gpg', 'grep', 'apt-key']


def check_missing_binaries(module):
    missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
    if len(missing):
        module.fail_json(msg="binaries are missing", names=missing)


def all_keys(module, keyring, short_format):
    if keyring:
        cmd = "apt-key --keyring %s adv --list-public-keys --keyid-format=long" % keyring
    else:
        cmd = "apt-key adv --list-public-keys --keyid-format=long"
Exemple #43
0
import funcy

# bibliotecas proprias
import minha_lib
import minha_outra_lib

# Imports com wildcard
# Errado
from re import *

fildall(r'\w+', 'eduardo foi a escola')

# Certo
from re import findall

fildall(r'\w+', 'eduardo foi a escola')

# Imports que sobreescrevem funções builtins
# ERRADO
from re import *  # NOQA
from re import compile  # NOQA

compile()  # Default ou import?    # NOQA

# CERTO
import re  # NOQA
from re import compile as re_compile  # NOQA

re_compile()  # NOQA
re.compile()  # NOQA
Exemple #44
0
 def compile(self, format):
     """Return a compiled re object for the format string."""
     return re_compile(self.pattern(format), IGNORECASE)
    "Modifier",
    "Marks",
    "Punctuation",
    "Symbols",
    "Forms",
    "Operators",
    "Miscellaneous",
    "Drawing",
    "Block",
    "Shapes",
    "Supplemental",
    "Tags",
]  # type: List[str]

RE_POSSIBLE_ENCODING_INDICATION = re_compile(
    r"(?:(?:encoding)|(?:charset)|(?:coding))(?:[\:= ]{1,10})(?:[\"\']?)([a-zA-Z0-9\-_]+)(?:[\"\']?)",
    IGNORECASE,
)

IANA_SUPPORTED = sorted(
    filter(
        lambda x: x.endswith("_codec") is False and x not in
        {"rot_13", "tactis", "mbcs"},
        list(set(aliases.values())),
    ))  # type: List[str]

IANA_SUPPORTED_COUNT = len(IANA_SUPPORTED)  # type: int

# pre-computed code page that are similar using the function cp_similarity.
IANA_SUPPORTED_SIMILAR = {
    "cp037": ["cp1026", "cp1140", "cp273", "cp500"],
    "cp1026": ["cp037", "cp1140", "cp273", "cp500"],
Exemple #46
0
    def __init__(
            self,
            label='lammps',
            tmp_dir=None,
            parameters={},
            specorder=None,
            files=[],
            always_triclinic=False,
            #specorder=['Zr','Cu','Al'], files=[], always_triclinic=False,
            keep_alive=True,
            keep_tmp_files=False,
            no_data_file=False):
        """The LAMMPS calculators object

        files: list
            Short explanation XXX
        parameters: dict
            Short explanation XXX
        specorder: list
            Short explanation XXX
        keep_tmp_files: bool
            Retain any temporary files created. Mostly useful for debugging.
        tmp_dir: str
            path/dirname (default None -> create automatically).
            Explicitly control where the calculator object should create 
            its files. Using this option implies 'keep_tmp_files'
        no_data_file: bool
            Controls whether an explicit data file will be used for feeding
            atom coordinates into lammps. Enable it to lessen the pressure on
            the (tmp) file system. THIS OPTION MIGHT BE UNRELIABLE FOR CERTIAN
            CORNER CASES (however, if it fails, you will notice...).
        keep_alive: bool
            When using LAMMPS as a spawned subprocess, keep the subprocess
            alive (but idling whn unused) along with the calculator object.
        always_triclinic: bool
            Force use of a triclinic cell in LAMMPS, even if the cell is
            a perfect parallelepiped.
        """

        self.label = label
        self.parameters = parameters
        self.specorder = specorder
        self.files = files
        self.always_triclinic = always_triclinic
        self.calls = 0
        self.forces = None
        self.keep_alive = keep_alive
        self.keep_tmp_files = keep_tmp_files
        self.no_data_file = no_data_file
        if tmp_dir is not None:
            # If tmp_dir is pointing somewhere, don't remove stuff!
            self.keep_tmp_files = True
        self._lmp_handle = None  # To handle the lmp process

        # read_log depends on that the first (three) thermo_style custom args
        # can be capitilized and matched aginst the log output. I.e.
        # don't use e.g. 'ke' or 'cpu' which are labeled KinEng and CPU.
        self._custom_thermo_args = [
            'step', 'temp', 'press', 'cpu', 'pxx', 'pyy', 'pzz', 'pxy', 'pxz',
            'pyz', 'ke', 'pe', 'etotal', 'vol', 'lx', 'ly', 'lz', 'atoms'
        ]
        self._custom_thermo_mark = ' '.join(
            [x.capitalize() for x in self._custom_thermo_args[0:3]])

        # Match something which can be converted to a float
        f_re = r'([+-]?(?:(?:\d+(?:\.\d*)?|\.\d+)(?:e[+-]?\d+)?|nan|inf))'
        n = len(self._custom_thermo_args)
        # Create a re matching exactly N white space separated floatish things
        self._custom_thermo_re = re_compile(r'^\s*' + r'\s+'.join([f_re] * n) +
                                            r'\s*$',
                                            flags=IGNORECASE)
        # thermo_content contains data "writen by" thermo_style.
        # It is a list of dictionaries, each dict (one for each line
        # printed by thermo_style) contains a mapping between each
        # custom_thermo_args-argument and the corresponding
        # value as printed by lammps. thermo_content will be
        # re-populated by the read_log method.
        self.thermo_content = []

        self.pea = []

        if tmp_dir is None:
            self.tmp_dir = mkdtemp(prefix='LAMMPS-')
        else:
            self.tmp_dir = os.path.realpath(tmp_dir)
            if not os.path.isdir(self.tmp_dir):
                os.mkdir(self.tmp_dir, 0755)

        for f in files:
            shutil.copy(f, os.path.join(self.tmp_dir, os.path.basename(f)))
Exemple #47
0
class PhysicalAddress(BaseAddress):
    """Class for handling KNX pyhsical addresses."""

    MAX_AREA = 15
    MAX_MAIN = 15
    MAX_LINE = 255
    ADDRESS_RE = re_compile(
        r'^(?P<area>\d{1,2})\.(?P<main>\d{1,2})\.(?P<line>\d{1,3})$')

    def __init__(self, address):
        """Initialize Address class."""
        super(PhysicalAddress, self).__init__()
        if isinstance(address, str):
            self.raw = self.__string_to_int(address)
        elif isinstance(address, tuple) and len(address) == 2:
            self.raw = address_tuple_to_int(address)
        elif isinstance(address, int):
            self.raw = address
        elif address is None:
            self.raw = 0
        else:
            raise CouldNotParseAddress(address)

        if isinstance(self.raw, int) and self.raw > 65535:
            raise CouldNotParseAddress(address)

    def __string_to_int(self, address):
        """
        Parse `address` as string to an integer and do some simple checks.

        Returns the integer representation of `address` if all checks are valid:
        * string matches against the regular expression
        * area, main and line are inside its range

        In any other case, we raise an `CouldNotParseAddress` exception.
        """
        match = self.ADDRESS_RE.match(address)
        if not match:
            raise CouldNotParseAddress(address)
        area = int(match.group('area'))
        main = int(match.group('main'))
        line = int(match.group('line'))
        if area > self.MAX_AREA or main > self.MAX_MAIN or line > self.MAX_LINE:
            raise CouldNotParseAddress(address)
        return (area << 12) + (main << 8) + line

    @property
    def area(self):
        """Return area part of pyhsical address."""
        return (self.raw >> 12) & self.MAX_AREA

    @property
    def main(self):
        """Return main part of pyhsical address."""
        return (self.raw >> 8) & self.MAX_MAIN

    @property
    def line(self):
        """Return line part of pyhsical address."""
        return self.raw & self.MAX_LINE

    @property
    def is_device(self):
        """Return `True` if this address is a valid device address."""
        return self.line != 0

    @property
    def is_line(self):
        """Return `True` if this address is a valid line address."""
        return not self.is_device

    def __repr__(self):
        """Return this object as parsable string."""
        return 'PhysicalAddress("{0.area}.{0.main}.{0.line}")'.format(self)
Exemple #48
0
''' Some helpers '''
from datetime import datetime, timedelta
from re import compile as re_compile

from app.db.connection import session
from app.db.models import Post

re_search = re_compile(r'(\d{1,2}) (minute|minutes|hour|hours) ago')


def aprox_time(time_cad, now=None):
    '''
        Depending on the post time, take the moment when created

        if 'just now' in cad, give the current time,
        else, extract the time pattern assuming,
        by instance: 4 minutes ago, '{value} {unit} ago'

        if not cad given or empty, return current time

        Parameters
        ----------
        time_cad : str
            string containing when post was created

        now: datetime
            optional, it's for testing the function

        Returns
        -------
        datetime
Exemple #49
0
 def __init__(self):
     self._fn_pat = re_compile(r'[<>;:"/|?*\\]+')
     self._init_logger()
Exemple #50
0
    def sort_pbxproj(self, sort_pbx_by_file_name=False):
        self.vprint('sort project.xpbproj file')
        lines = []
        removed_lines = []
        files_start_ptn = re_compile('^(\s*)files = \(\s*$')
        files_key_ptn = re_compile('((?<=[A-Z0-9]{24} \/\* )|(?<=[A-F0-9]{32} \/\* )).+?(?= in )')
        fc_end_ptn = '\);'
        files_flag = False
        children_start_ptn = re_compile('^(\s*)children = \(\s*$')
        children_pbx_key_ptn = re_compile('((?<=[A-Z0-9]{24} \/\* )|(?<=[A-F0-9]{32} \/\* )).+?(?= \*\/)')
        child_flag = False
        pbx_start_ptn = re_compile('^.*Begin (PBXBuildFile|PBXFileReference) section.*$')
        pbx_key_ptn = re_compile('^\s+(([A-Z0-9]{24})|([A-F0-9]{32}))(?= \/\*)')
        pbx_end_ptn = ('^.*End ', ' section.*$')
        pbx_flag = False
        last_two = deque([])

        tempOutputFilename = self.xcode_pbxproj_path + '.tempoutput'

        def file_dir_order(x):
            x = children_pbx_key_ptn.search(x).group()
            return '.' in x, x

        orig_stdout = sys.stdout
        tempOutput = open(tempOutputFilename, 'w+')
        sys.stdout = tempOutput

        for line in fi_input(self.xcode_pbxproj_path):
            try:
                # project.pbxproj is an utf-8 encoded file
                line = decoded_string(line, 'utf-8')
                last_two.append(line)
                if len(last_two) > 2:
                    last_two.popleft()
                # files search and sort
                files_match = files_start_ptn.search(line)
                if files_match:
                    output_u8line(line)
                    files_flag = True
                    if isinstance(fc_end_ptn, six.text_type):
                        fc_end_ptn = re_compile(files_match.group(1) + fc_end_ptn)
                if files_flag:
                    if fc_end_ptn.search(line):
                        if lines:
                            lines.sort(key=lambda file_str: files_key_ptn.search(file_str).group())
                            output_u8line(''.join(lines))
                            lines = []
                        files_flag = False
                        fc_end_ptn = '\);'
                    elif files_key_ptn.search(line):
                        if line in lines:
                            removed_lines.append(line)
                        else:
                            lines.append(line)
                # children search and sort
                children_match = children_start_ptn.search(line)
                if children_match:
                    output_u8line(line)
                    child_flag = True
                    if isinstance(fc_end_ptn, six.text_type):
                        fc_end_ptn = re_compile(children_match.group(1) + fc_end_ptn)
                if child_flag:
                    if fc_end_ptn.search(line):
                        if lines:
                            if self.main_group_hex not in last_two[0]:
                                lines.sort(key=file_dir_order)
                            output_u8line(''.join(lines))
                            lines = []
                        child_flag = False
                        fc_end_ptn = '\);'
                    elif children_pbx_key_ptn.search(line):
                        if line in lines:
                            removed_lines.append(line)
                        else:
                            lines.append(line)
                # PBX search and sort
                pbx_match = pbx_start_ptn.search(line)
                if pbx_match:
                    output_u8line(line)
                    pbx_flag = True
                    if isinstance(pbx_end_ptn, tuple):
                        pbx_end_ptn = re_compile(pbx_match.group(1).join(pbx_end_ptn))
                if pbx_flag:
                    if pbx_end_ptn.search(line):
                        if lines:
                            if sort_pbx_by_file_name:
                                lines.sort(key=lambda file_str: children_pbx_key_ptn.search(file_str).group())
                            else:
                                lines.sort(key=lambda file_str: pbx_key_ptn.search(file_str).group(1))
                            output_u8line(''.join(lines))
                            lines = []
                        pbx_flag = False
                        pbx_end_ptn = ('^.*End ', ' section.*')
                    elif children_pbx_key_ptn.search(line):
                        if line in lines:
                            removed_lines.append(line)
                        else:
                            lines.append(line)
                # normal output
                if not (files_flag or child_flag or pbx_flag):
                    output_u8line(line)
            except:
                # abort current file write and resume to backup pbxproj file
                unlink(tempOutputFilename)
                raise XUniqueExit("Sort Failed")
        fi_close()
        tempOutput.close()
        sys.stdout = orig_stdout
        if filecmp_cmp(self.xcode_pbxproj_path, tempOutputFilename, shallow=False):
            unlink(tempOutputFilename)
            warning_print('Ignore sort, no changes made to "', self.xcode_pbxproj_path, sep='')                    
        else:
            unlink(self.xcode_pbxproj_path)
            rename(tempOutputFilename, self.xcode_pbxproj_path)
            self._is_modified = True
            success_print('Sort done')
            if removed_lines:
                warning_print('Following lines were deleted because of duplication:')
                print_ng(*removed_lines, end='')
Exemple #51
0
# -*- coding: utf-8 -*-
from logging import getLogger
from re import compile as re_compile
from xml.etree.ElementTree import ParseError

from utils import XmlKodiSetting, reboot_kodi, language as lang
from PlexFunctions import get_plex_sections
from PlexAPI import API
import variables as v

###############################################################################
LOG = getLogger("PLEX." + __name__)

REGEX_MUSICPATH = re_compile(r'''^\^(.+)\$$''')
###############################################################################


def excludefromscan_music_folders():
    """
    Gets a complete list of paths for music libraries from the PMS. Sets them
    to be excluded in the advancedsettings.xml from being scanned by Kodi.
    Existing keys will be replaced

    Reboots Kodi if new library detected
    """
    xml = get_plex_sections()
    try:
        xml[0].attrib
    except (TypeError, IndexError, AttributeError):
        LOG.error('Could not get Plex sections')
        return
Exemple #52
0
#
# based (heavily) on chemical formula parser from Tim Peters
# modified (simplified to not compute molecular weights here,
# extended to support floating point, made into Parser class)
#   Matt Newville  Univ Chicago  Jan-2013
#

from re import compile as re_compile
class Element:
    def __init__(self, symbol):
        self.sym = symbol

    def add(self, weight, result):
        result[self.sym] = result.get(self.sym, 0) + weight

LEXER = re_compile(r"[A-Z][a-z]*|[0-9]+\.?[0-9]*([eE][-+]?[0-9]+)?|[()]|<EOS>").match
NAME, NUM, LPAREN, RPAREN, EOS = range(5)
BADSYM = "'{:s}' is not an element symbol"

ELEMENTS = {}
for sym in ('Ac', 'Ag', 'Al', 'Am', 'Ar', 'As', 'At', 'Au', 'B', 'Ba', 'Be',
       'Bi', 'Bk', 'Br', 'C', 'Ca', 'Cd', 'Ce', 'Cf', 'Cl', 'Cm', 'Co',
       'Cr', 'Cs', 'Cu', 'Dy', 'Er', 'Es', 'Eu', 'F', 'Fe', 'Fm', 'Fr',
       'Ga', 'Gd', 'Ge', 'H', 'He', 'Hf', 'Hg', 'Ho', 'I', 'In', 'Ir', 'K',
       'Kr', 'La', 'Li', 'Lr', 'Lu', 'Md', 'Mg', 'Mn', 'Mo', 'N', 'Na',
       'Nb', 'Nd', 'Ne', 'Ni', 'No', 'Np', 'O', 'Os', 'P', 'Pa', 'Pb',
       'Pd', 'Pm', 'Po', 'Pr', 'Pt', 'Pu', 'Ra', 'Rb', 'Re', 'Rh', 'Rn',
       'Ru', 'S', 'Sb', 'Sc', 'Se', 'Si', 'Sm', 'Sn', 'Sr', 'Ta', 'Tb',
       'Tc', 'Te', 'Th', 'Ti', 'Tl', 'Tm', 'U', 'Unh', 'Unp', 'Unq', 'Uns',
       'V', 'W', 'Xe', 'Y', 'Yb', 'Zn', 'Zr'):
    ELEMENTS[sym]  = Element(sym)
Exemple #53
0
# -*- coding: utf-8 -*-
"""Codes specifically related to PubMed inputs."""

from collections import defaultdict
from config import ncbi_api_key, ncbi_email, ncbi_tool
from datetime import datetime
import logging
from re import compile as re_compile
from threading import Thread

from requests import get as requests_get

from src.commons import dict_to_sfn_cit_ref, Name, b_TO_NUM
from src.doi import crossref

NON_DIGITS_SUB = re_compile(r'[^\d]').sub

NCBI_URL = (
    'https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esummary.fcgi?'
    f'api_key={ncbi_api_key}&retmode=json&tool={ncbi_tool}&email={ncbi_email}')
PUBMED_URL = NCBI_URL + '&db=pubmed&id='
PMC_URL = NCBI_URL + '&db=pmc&id='


class NCBIError(Exception):

    pass


def pmid_sfn_cit_ref(pmid: str, date_format='%Y-%m-%d') -> tuple:
    """Return the response namedtuple."""
Exemple #54
0
from invenio.webauthorprofile_corefunctions import canonical_name, \
            _get_person_names_dicts_bai, _get_person_names_dicts_fallback

from invenio.webauthorprofile_config import deserialize as real_deserialize
from invenio.webauthorprofile_config import CFG_WEBAUTHORPROFILE_USE_BIBAUTHORID, serialize

from invenio.bibtask import write_message, task_update_progress

import collections
import functools
import random
import gc

from zlib import decompress

year_pattern = re_compile(r'(\d{4})')
AVG_PAPERS_PER_BUNCH = 10000


class memoized(object):
    '''Decorator. Caches a function's return value each time it is called.
   If called later with the same arguments, the cached value is returned
   (not reevaluated).
   Keeps at most cache_limit elements. Deletes half of caches in case of overflow.
   '''
    cache_limit = 2000000

    def __init__(self, func):
        self.func = func
        self.cache = {}
Exemple #55
0
        from threading import current_thread
        if current_thread().name == 'MainThread':
            signal.signal(signal.SIGPIPE, signal.SIG_IGN)
except ImportError:
    pass

__all__ = ("Request", "Response", "HTTPError", "InvalidMethod", "CurlError",
           "CURL_INFO_MAP")

logger = getLogger("human_curl.core")

# DEFAULTS
DEFAULT_TIME_OUT = 15.0
STATUSES_WITH_LOCATION = (301, 302, 303, 305, 307)
PYCURL_VERSION_INFO = pycurl.version_info()
HTTP_GENERAL_RESPONSE_HEADER = re_compile(
    r"(?P<version>HTTP\/.*?)\s+(?P<code>\d{3})\s+(?P<message>.*)")

try:
    CURL_VERSION = PYCURL_VERSION_INFO[1]
except IndexError as e:
    CURL_VERSION = ""
    logger.warn("Unknown pycURL / cURL version")

PROXIES_TYPES_MAP = {
    'socks5': pycurl.PROXYTYPE_SOCKS5,
    'socks4': pycurl.PROXYTYPE_SOCKS4,
    'http': pycurl.PROXYTYPE_HTTP,
    'https': pycurl.PROXYTYPE_HTTP
}

# FULL LIST OF GETINFO OPTIONS
Exemple #56
0
HTTP_METHOD_CHOICES = (("GET", "GET"), ("POST", "POST"))

HTTP_AUTH_TYPE_CHOICES = (
    ("", "No authentication"),
    ("basic", "Basic"),
    ("digest", "Digest"),
)

AUTH_TYPE_CLASSES = {'basic': HTTPBasicAuth, 'digest': HTTPDigestAuth}

DATABASES_PATH = "/var/db/darwin"
DATABASES_OWNER = "vlt-os:vlt-conf"
DATABASES_PERMS = "644"

REGEX_GZ = re_compile("filename=\"?([^\";]+)\"?")


class ReputationContext(models.Model):
    """ Model used to enrich logs in Rsyslog with mmdb database"""
    name = models.TextField(
        default="Reputation context",
        verbose_name=_("Friendly name"),
        help_text=_("Custom name of the current object"),
    )
    """ Database type """
    db_type = models.TextField(
        default=DBTYPE_CHOICES[0][0],
        choices=DBTYPE_CHOICES,
        verbose_name=_("Database type"),
        help_text=_("Type of database"),
Exemple #57
0
# For convenience.
if sys.argv[-1] == "publish":
    system("python setup.py sdist upload")
    sys.exit()


def read(filename):
    kwds = {"encoding": "utf-8"} if sys.version_info[0] >= 3 else {}
    with open(filename, **kwds) as fp:
        contents = fp.read()
    return contents


# Get the version information.
here = path.abspath(path.dirname(__file__))
vre = re_compile("__version__ = \"(.*?)\"")
version = vre.findall(read(path.join(here, "snob", "__init__.py")))[0]

setup(name="snob",
      version=version,
      author="Andrew R. Casey",
      author_email="*****@*****.**",
      description="Put things in classes",
      long_description=read(path.join(here, "README.md")),
      license="MIT",
      classifiers=[
          "Intended Audience :: Science/Research",
          "License :: OSI Approved :: MIT License",
          "Operating System :: OS Independent",
          "Programming Language :: Python :: 2.7",
          "Programming Language :: Python :: 3.6",
Exemple #58
0
    help=
    "if an np head is covered by annotations of the same type apart fron it's non-alpha characters create an annotation (example: \"p16(INK4a)\")"
)
ARGPARSER.add_argument('-n', '--no-warn', action='store_true')
ARGPARSER.add_argument(
    '-g',
    '--generate',
    action='store_true',
    help=('generate additional annotations by eliminating determiners '
          '(DT), pronouns (PRP and PRP$) and cardinal numbers (CD) from '
          'the beginning of the annotation'))
ARGPARSER.add_argument('-m', '--merge', action='store_true')
ARGPARSER.add_argument('-d', '--debug', action='store_true')
ARGPARSER.add_argument('-r', '--dry-run', action='store_true')
ARGPARSER.add_argument('-v', '--verbose', action='store_true')
PTB_TAGS_REGEX = re_compile(r'\((?P<tag>[^ ]+)')
PTB_TOKENS_REGEX = re_compile(r'(?P<token>[^ )]+?)\)')
WHITESPACE_CHARS = set(whitespace)
###


def _ptb_token_gen(ptb):
    for match in PTB_TOKENS_REGEX.finditer(ptb):
        yield match.groupdict()['token']


def _ptb_tag_gen(ptb):
    for match in PTB_TAGS_REGEX.finditer(ptb):
        yield match.groupdict()['tag']

Exemple #59
0
from re import match, compile as re_compile, M as RE_MULTILINE
from subprocess import PIPE
from subprocess import Popen

import sublime
import sublime_plugin

PLUGIN_PATH = os.path.join(sublime.packages_path(),
                           os.path.dirname(os.path.realpath(__file__)))

IS_ST3 = int(sublime.version()) >= 3000
IS_PY2 = sys.version_info[0] == 2

SYNTAX_ERROR_RE = re_compile(
    r'^.+?:\s(?:(?P<error>SyntaxError)):\s(?P<message>.+) \((?P<line>\d+):(?P<col>\d+)\)',
    RE_MULTILINE)

if IS_PY2:
    # st with python 2x
    from jsprettier.const import PLUGIN_CMD_NAME
    from jsprettier.const import PLUGIN_NAME
    from jsprettier.const import PRETTIER_OPTION_CLI_MAP
    from jsprettier.const import SETTINGS_FILENAME

    from jsprettier.sthelper import debug_enabled
    from jsprettier.sthelper import get_setting
    from jsprettier.sthelper import get_st_project_path
    from jsprettier.sthelper import get_sub_setting
    from jsprettier.sthelper import has_selection
    from jsprettier.sthelper import is_file_auto_formattable
from subprocess import check_output
from sys import version_info
if version_info.major == 3:
    PY2 = False
    from tokenize import tokenize, STRING
else:
    PY2 = True
    from tokenize import generate_tokens as tokenize, STRING

from unidiff import PatchSet


# Regexp for a line that is allowed to be longer than the limit.
# It does not make sense to break long URLs.
# https://github.com/PyCQA/pylint/blob/d42e74bb9428f895f36808d30bd8a1fe31e28f63/pylintrc#L145
IGNORABLE_LONG_LINE = re_compile(r'\s*(# )?<?https?://\S+>?$').match

STRING_MATCH = re_compile(
    r'(?P<unicode_literal>u)?[bfr]*(?P<quote>\'+|"+)', IGNORECASE,
).match


def get_latest_patchset():
    """Return the PatchSet for the latest commit."""
    # regex from https://github.com/PyCQA/pylint/blob/master/pylintrc
    output = check_output(
        ['git', 'diff', '-U0', '@~..@'])
    return PatchSet.from_string(
        output.replace(b'\r\n', b'\n'), encoding='utf-8')