Ejemplo n.º 1
0
    def __parse_output(self):
        rr_regex = recompile(self.avg_read_rate_key)
        wr_regex = recompile(self.avg_write_rate_key)

        lines = self._output.split("\n")
        for line in lines:
            if rr_regex.match(line):
                self._result_map[self.avg_read_map_key] = self.__extract_rate(line)
            if wr_regex.match(line):
                self._result_map[self.avg_write_map_key] = self.__extract_rate(line)
Ejemplo n.º 2
0
def changeNameRegex(flag, regex):
    """Set regular expression used for flagging elements based on atom names.
    See :ref:`element-flags` for the default list of flags."""
    
    if flag not in EDITORS:
        raise ValueError('{0:s} is not an editable flag'.format(repr(flag)))

    try:
        recompile(regex)
    except Exception as err:
        raise ValueError('{0:s} is not a valid regular expression, {1:s}'
                         .format(repr(regex), str(err)))
    else:
        changeDefinitions(**{flag: regex})
Ejemplo n.º 3
0
    def get_bot_information(self, file_data):
        ret = {}
        try:
            p = recompile(r'var[\s]+\$config[\s]*=[\s]*array[\s]*\([\s]*(\"[^\"]*\"[\s]*=>.*,?[\s]*)*(//)?\);', MULTILINE)
            result = p.search(file_data)
            if result is None:
                return {}
            ret = self.get_config_values(result.group(0))
            uris = []
            server = ret['server'] if 'server' in ret else None
            server_pass = ret['pass'] if "pass" in ret else None
            port = int(ret['port']) if 'port' in ret else 6667
            chan = ret['chan'] if 'chan' in ret else None
            chan2 = ret['chan2'] if 'chan2' in ret else None
            key = ret['key'] if 'key' in ret else server_pass

            uris.append("pbot://{0}:{1}/?{2}".format(server, port, urlencode({"server_pass": server_pass,
                                                                              "chan": chan, "channel_pass": key})))
            if chan2 is not None:
                uris.append("pbot://{0}:{1}/?{2}".format(server, port, urlencode({"server_pass": server_pass,
                                                                                  "chan": chan2, "channel_pass": key})))
            ret['c2s'] = []
            for uri in uris:
                ret['c2s'].append({"c2_uri": uri})

        except KeyboardInterrupt:
            raise
        except:
            pass
        return ret
Ejemplo n.º 4
0
    def purge_faulty_drives(self):
	dev_entry_re = recompile ("^dev")

	# walk the dev-sdX entries in the sysfs for a raid device and remove any
	# that are faulty.
	md_rootdir  =	'/sys/block/%s/md/' % self.get_devname()
	try:
	    dir = listdir(md_rootdir)
	    for d in dir:
		if dev_entry_re.match (d):
		    state_entry = '%s%s/state' % (md_rootdir, d) 
		    try:
			state	    = '%s' % get_sysfs_param(state_entry)
		    except (IOError, OSError):
			# ignore and continue
			continue
		
		    if state == "faulty":
			rlog_debug ('Cleaning up stale device [%s] reference in array [%s]' % (
				    d, self.get_devname()))
			# we found a disk that should have been removed but wasnt
			if not set_sysfs_param (state_entry, 'remove'):
			    rlog_notice ('Unable to remove faulty device [%s] from array [%s]' % (
					 self.get_devname(),
					 d))
	except (IOError, OSError):
	    # make sure we keep on going if we have a problem, we want to try to
	    # fix any inconsistancies found
	    pass
Ejemplo n.º 5
0
    def __parse_output(self):
        rt_regex = recompile(self.test_run_time_key)

        lines = self._output.split("\n")
        for line in lines:
            if rt_regex.match(line):
                self._result_map[self.test_run_time_key] = self.__extract_rate(line)
Ejemplo n.º 6
0
def tar( dir = '.', glob = '.*', verbose = True ):
	if not isdir( dir ): raise ValueError( '{0} is not a directory'.format( dir ) )
	dir = abspath( dir )
	offset = len( dir ) + 1
	glob = recompile( glob )
	buf = BytesIO()
	with TarFile.open( mode = 'w', fileobj = buf, dereference = True ) as tf:
		num_files = 0
		nonempty_dirs = set()
		for base, dirs, files in walk( dir, followlinks = True ):
			if num_files > MAX_NUM_FILES: break
			for fpath in files:
				path = join( base, fpath )
				rpath = path[ offset: ]
				if glob.search( rpath ) and stat( path ).st_size < MAX_FILESIZE:
					num_files += 1
					if num_files > MAX_NUM_FILES: break
					if verbose: sys.stderr.write( rpath + '\n' )
					with open( path, 'rb' ) as f:
						ti = tf.gettarinfo( arcname = rpath, fileobj = f )
						ti.mtime = 1
						nonempty_dirs.add( dirname( path ) )
						tf.addfile( ti, fileobj = f )
		for path in nonempty_dirs:
			rpath = path[ offset: ]
			if not rpath: continue
			ti = tf.gettarinfo( name = path, arcname = rpath )
			ti.mtime = 1
			tf.addfile( ti )
	return encodestring( buf.getvalue() )
Ejemplo n.º 7
0
def get_files_from_image(motherboard):
    base_ipmi_file_loc = '/opt/hal/lib/ipmi'
    ipmi_config_path   = '%s/ipmi.xml' % base_ipmi_file_loc

    if not exists (ipmi_config_path):
        raise GeneralError('IPMI configuration file %s does not exist' % ipmi_config_path)

    dom = parse (ipmi_config_path)
    entries = dom.getElementsByTagName('mobo')
    if not entries:
        raise GeneralError ('Invalid IPMI configuration file %s' % ipmi_config_path)
    
    for entry in entries:
        # look for the mobo tag and find the 
        # entry that matches our motherboard via the regex in the xml 
        # node
        entry_name_pattern = entry.getAttribute('name')
        if entry_name_pattern != '':
            re = recompile(entry_name_pattern)
            if re.match(motherboard):
                try:
                    sdr = entry.getElementsByTagName('sdr')[0].getAttribute('fname')
                    fw = entry.getElementsByTagName('fw')[0].getAttribute('fname')
                except IndexError:
                    raise GeneralError ('Invalid ipmi xml file for motherboard: %s' %
                                        motherboard)
                return ('%s/%s' % (base_ipmi_file_loc, fw), 
                        '%s/%s' % (base_ipmi_file_loc, sdr))
        
    return (None, None)
Ejemplo n.º 8
0
def updateDefinitions():
    """Update definitions and set some global variables.  This function must be
    called at the end of the module."""

    global DEFINITIONS, AMINOACIDS, BACKBONE, TIMESTAMP
    DEFINITIONS = {}
    user = SETTINGS.get('flag_definitions', {})
    
    # nucleics
    nucleic = set()
    for key in ['nucleobase', 'nucleoside', 'nucleotide']:
        aset = set(user.get(key, DEFAULTS[key]))
        nucleic.update(aset)
        DEFINITIONS[key] = aset
    DEFINITIONS['nucleic'] = nucleic
    
    # heteros
    for key in ['water', 'lipid', 'ion', 'sugar', 'heme', 
                 'at', 'cg', 'purine', 'pyrimidine',]:
        DEFINITIONS[key] = set(user.get(key, DEFAULTS[key]))
        
    DEFINITIONS['backbone'] = DEFINITIONS['bb'] = set(user.get(key, 
                                                           DEFAULTS['bb']))
    DEFINITIONS['backbonefull'] = DEFINITIONS['bbfull'] = set(user.get(key, 
                                                           DEFAULTS['bbfull']))

    # element regex
    for key in ['hydrogen', 'carbon', 'nitrogen', 'oxygen', 'sulfur']:
        DEFINITIONS[key] = recompile(user.get(key, DEFAULTS[key]))

    try:
        nonstd = SETTINGS[NONSTANDARD_KEY]
        
    except KeyError:
        nonstd = NONSTANDARD
        DEFINITIONS.update(CATEGORIZED)
    else:

        for cat in CATEGORIES:
            for key in CATEGORIES[cat]:
                DEFINITIONS[key] = set(DEFAULTS[key])

        DEFINITIONS['charged'] = set(DEFINITIONS['acidic'])
        DEFINITIONS['charged'].update(DEFINITIONS['basic'])

        for resi, props in nonstd.iteritems():
            for prop in props: 
                DEFINITIONS[prop].add(resi)

    DEFINITIONS['stdaa'] = DEFAULTS['stdaa']
    DEFINITIONS['nonstdaa'] = set(nonstd)
    AMINOACIDS = set(DEFINITIONS['stdaa'])
    AMINOACIDS.update(DEFINITIONS['nonstdaa'])
    DEFINITIONS['protein'] = DEFINITIONS['aminoacid'] = AMINOACIDS
    
    BACKBONE = DEFINITIONS['bb']

    global TIMESTAMP
    TIMESTAMP = SETTINGS.get('flag_timestamp', 0)
Ejemplo n.º 9
0
    def get_links_from_pattern(self, pat, links):
        pattern = recompile('^'+pat.upper())

        ret = []
        for l in links:
            if pattern.search(l.tnv_string.upper()):
                ret.append(l)

        return ret
Ejemplo n.º 10
0
 def get_config_values(self, config):
     try:
         p = recompile(r'[\'"](?P<key>[^\'"]+)[\'"][\s]*=>[\s]*[\'"](?P<value>[^\'"]+)[\'"]', MULTILINE)
         results = p.findall(config)
         ret = {}
         for pair in results:
             ret[pair[0]] = pair[1]
         return ret
     except:
         return {}
Ejemplo n.º 11
0
 def get_config_values(self, config):
     try:
         p = recompile(r'[\'"](?P<key>[^\'"]+)[\'"][\s]*=>[\s]*[\'"](?P<value>[^\'"]+)[\'"]', MULTILINE)
         results = p.findall(config)
         ret = {}
         for pair in results:
             ret[unicode(pair[0], errors='ignore')] = unicode(pair[1], errors='ignore')
         return ret
     except:
         return {}
Ejemplo n.º 12
0
    def get_mactab(self, nics, cards, out_path, gateway_naming):
        out_file = None
        if out_path != "":
            out_file = open(out_path, "w")

        pci_links = self.make_pci_links()
        # XXX:munir: Dump this to debug the PCI bus patterns. Easiest way
        # I found to figure interface patterns
        # for p in pci_links:
        #    p.printo()

        ob_links = self.get_links_from_pattern(self.mobo.onboard_pattern, pci_links)
        if len(ob_links) == 0:
            ifc = ifconfig(arg="-a")
            link = recompile("Link")
            for l in ifc:
                lk = link.search(l, 1)
                if lk:
                    parts = l.split()
                    if len(parts) > 4:
                        print >> out_file, "primary", parts[4]
            return

        if self.mobo.num_ifs == 2:
            if self.mobo.primary_first:
                primary = ob_links[0]
                aux = ob_links[1]
            else:
                primary = ob_links[1]
                aux = ob_links[0]

            dev_map_list = [{"device": primary, "name": "primary"}, {"device": aux, "name": "aux"}]

        else:
            # we only have primary no aux
            primary = ob_links[0]
            dev_map_list = [{"device": primary, "name": "primary"}]

        (map, nic_brand_state) = self.get_address_map(nics, cards, gateway_naming, return_map=True, get_branding=False)

        for a in map:
            for nic in dev_map_list:
                if nic["device"].memory_base == a[2]:
                    print >> out_file, nic["name"], a[1]

        if self.mobo.part_num == "VM":
            self.get_mactab_vm_slots(nics, cards, pci_links, map, out_file, gateway_naming)
            return

        for (n, s) in self.mobo.slot_patterns:
            l = self.get_links_from_pattern(s, pci_links)
            if l != []:
                card = self.get_card_from_link(nics, cards, l[0])

                self._print_rename_pairs(card, l, map, int(n), out_file, gateway_naming, False)
Ejemplo n.º 13
0
 def get_bot_information(self, file_data):
     ret = {}
     try:
         p = recompile(r'var[\s]+\$config[\s]*=[\s]*array[\s]*\([\s]*(\"[^\"]*\"[\s]*=>.*,?[\s]*)*(//)?\);', MULTILINE)
         result = p.search(file_data)
         if result is None:
             return {}
         ret = self.get_config_values(result.group(0))
     except:
         pass
     return ret
Ejemplo n.º 14
0
def get_mobo_hint():
    base_info = dmidecode(' | grep -A 6 \"Base Board\"')

    mobo_family = {}
    mkey = ''
    pkey = ''

    mb = recompile("^\s+Manufacturer:\s+([\w\-]+).*$")
    pr = recompile("^\s+Product\sName:\s+([\w\-]+).*$")
    for b in base_info:
        mmb = mb.match(b)
        mpr = pr.match(b)

        if mmb:
            mkey = mmb.group(1)
        if mpr:
            pkey = mpr.group(1)

    for m in mobos:
        for k in m.name_keys:
            man = "sent"
            ks =  k.split()
            if len(ks) >= 2:
                man = ks[0]
                prod = ks[1]
            else:
                prod = ks[0]

            # Virtual model motherboard config entry, skip it
            if m.virtual == "true" or m.isbob == "true":
                continue

            if man == "sent":
                if prod.upper() == pkey.upper():
                        mobo_family[m.part_num] = 1
            else:
                if man.upper() == mkey.upper() and \
                        prod.upper() == pkey.upper():
                        mobo_family[m.part_num] = 1

    print ",".join(mobo_family.keys())
Ejemplo n.º 15
0
Archivo: reader.py Proyecto: 10sr/tbar
 def __init__(self, infile, comment="#", sep=" ", field=(1,2), regexp=None):
     self.infile = infile
     self.comment = comment
     if regexp:
         from re import compile as recompile
         self.regexp = recompile(regexp)
         self.__read_line = self.__read_line_regexp
     else:
         self.sep = sep
         self.field = field
         self.__read_line = self.__read_line_cut
     return
Ejemplo n.º 16
0
    def __init__(self, call, **kwargs):
        """EventHandler initialization

            PARAM: <func> function call for handling line when matched"""

        if not callable(call):
            raise RuntimeError

        self.nick = recompile(kwargs.get('nick', '.*'))
        """nick to match
            FORMAT: <module:regex>"""

        self.ident = recompile(kwargs.get('ident', '.*'))
        """ident to match
            FORMAT: <module:regex>"""

        self.hostname = recompile(kwargs.get('hostname', '.*'))
        """hostname to match
            FORMAT: <module:regex>"""

        self.command = recompile(kwargs.get('command', '.*'))
        """command to match
            FORMAT: <module:regex>"""

        self.argument = recompile(kwargs.get('argument', '.*'))
        """argument to match
            FORMAT: <module:regex>"""

        self.message = recompile(kwargs.get('message', '.*'))
        """message to match (this is the final part of the message)
            FORMAT: <module:regex>"""

        self.call = call
        """the function to call if there is a match
Ejemplo n.º 17
0
class NrfSvcDef:

    CLANGCRE = recompile(r'^\s*#elif defined\(__clang__\)\s*$')

    PATCH = r"""
--- a/nrf_svc.h (revision 4491)
+++ b/nrf_svc.h (working copy)
@@ -52,6 +52,9 @@
 #ifndef SVCALL
 #if defined (__CC_ARM)
 #define SVCALL(number, return_type, signature) return_type __svc(number) signature
+#elif defined(__clang__)
+#define SVCALL(number, return_type, signature) \
+   static inline return_type signature;
 #elif defined (__GNUC__)
 #ifdef __cplusplus
 #define GCC_CAST_CPP (uint16_t)
""".lstrip('\n')

    def __init__(self):
        pass

    def parse(self, filename: str):
        with open(filename, 'rt') as fp:
            for line in fp:
                line = line.strip()
                if self.CLANGCRE.match(line):
                    # do not account for already upgraded files
                    return False
        return True

    def apply(self, filename: str, dryrun: bool = False):
        environment = dict(environ)
        environment['LC_ALL'] = 'C'
        cwd = dirname(filename)
        args = [
            'patch', '-p1', '--no-backup-if-mismatch', '--silent',
            '--reject-file', '/dev/null'
        ]
        if dryrun:
            args.append('--dry-run')
        proc = Popen(args,
                     stdin=PIPE,
                     stdout=PIPE,
                     env=environment,
                     cwd=cwd,
                     universal_newlines=True)
        try:
            out, _ = proc.communicate(input=self.PATCH, timeout=2.0)
            print(out)
        except TimeoutExpired:
            proc.kill()
Ejemplo n.º 18
0
def extract_json(content, name):
    """Extract json from netflix content page"""
    common.debug('Extracting {} JSON'.format(name))
    try:
        json_array = recompile(JSON_REGEX % name, DOTALL).findall(content)
        json_str = json_array[0]
        json_str = json_str.replace('\"', '\\"')  # Escape double-quotes
        json_str = json_str.replace('\\s', '\\\\s')  # Escape \s
        json_str = json_str.decode('unicode_escape')  # finally decoding...
        return json.loads(json_str)
    except Exception:
        common.error(traceback.format_exc())
        raise WebsiteParsingError('Unable to extract {}'.format(name))
Ejemplo n.º 19
0
 def get_config_values(self, config):
     try:
         p = recompile(
             r'[\'"](?P<key>[^\'"]+)[\'"][\s]*=>[\s]*[\'"](?P<value>[^\'"]+)[\'"]',
             MULTILINE)
         results = p.findall(config)
         ret = {}
         for pair in results:
             ret[unicode(pair[0],
                         errors='ignore')] = unicode(pair[1],
                                                     errors='ignore')
         return ret
     except:
         return {}
Ejemplo n.º 20
0
 def __init__(self, silent=False):
     self.websites_entries = []
     self.shared_detections = []
     self.generic_detection = []
     self.log = getLogger("social-analyzer")
     self.sites_path = path.join(path.dirname(__file__), "data", "sites.json")
     self.languages_path = path.join(path.dirname(__file__), "data", "languages.json")
     self.strings_pages = recompile('captcha-info|Please enable cookies|Completing the CAPTCHA', IGNORECASE)
     self.strings_titles = recompile('not found|blocked|attention required|cloudflare', IGNORECASE)
     self.strings_meta = recompile(r'regionsAllowed|width|height|color|rgba\(|charset|viewport|refresh|equiv|robots', IGNORECASE)
     self.top_pattern = recompile('^top([0-9]+)$', IGNORECASE)
     self.languages_json = None
     self.sites_dummy = None
     self.workers = 15
     self.custom_message = 51
     self.timeout = None
     self.waf = True
     self.logs_dir = ''
     self.ret = False
     self.headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:86.0) Gecko/20100101 Firefox/86.0", }
     self.silent = silent
     self.screenshots = None
     self.screenshots_location = None
Ejemplo n.º 21
0
    def __init__ (self, header, skip_secondary=True):

        print("\nParsing sam sequences")

        # Store defined variables
        self.header = header
        self.skip_secondary = skip_secondary

        # counters
        self.count = {"total":0, "invalid":0, "primary":0, "secondary":0}

        # For cigar to tuple code conversion
        self.cigar_to_code = {'M':0, 'I':1, 'D':2, 'N':3, 'S':4, 'H':5, 'P':6, '=':7, 'X':8}
        self.cigar_regex = recompile("(\d+)([MIDNSHP=X])")
Ejemplo n.º 22
0
 def __init__(self, logger, config, bootpd=None):
     self.log = logger
     self.config = config
     self.sock = []
     self.bootpd = bootpd
     self.blocksize = int(
         self.config.get(self.TFTP_SECTION, 'blocksize', '512'))
     self.timeout = float(
         self.config.get(self.TFTP_SECTION, 'timeout', '2.0'))
     self.retry = int(self.config.get(self.TFTP_SECTION, 'blocksize', '5'))
     self.root = self.config.get(self.TFTP_SECTION, 'root', os.getcwd())
     self.fcre, self.filepatterns = self.get_file_filters()
     self.genfilecre = recompile(r'\[(?P<name>[\w\.\-]+)\]')
     self._resume = False
Ejemplo n.º 23
0
    def __init__(self, mongo_url, mongo_id, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.remove_command('help')

        self.ready = self.siritori = False

        dbclient = motor(mongo_url)
        self.collection = dbclient.kasomsgcount.first
        self.mongo_id = mongo_id

        kakasi = py_kakasi()
        kakasi.setMode('J', 'H')
        self.conv = kakasi.getConverter()

        with open('config.json', 'r', encoding='utf-8') as file:
            self.config = load(file)

        self.fm_id_regex = id_regex = recompile(
            r'(?:(?P<channel_id>[0-9]{15,21})-)?(?P<message_id>[0-9]{15,21})$')
        self.fm_link_regex = recompile(
            r'https?://(?:(ptb|canary|www)\.)?discord(?:app)?\.com/channels/'
            r'(?:[0-9]{15,21}|@me)'
            r'/(?P<channel_id>[0-9]{15,21})/(?P<message_id>[0-9]{15,21})/?$')

        self.time_remove_role_regix = recompile(
            r'(?P<user_id>[0-9]{15,21})/'
            r'(?P<role_id>[0-9]{15,21})/'
            r'(?P<datetime>[0-9]{4}\-[0-9]{2}\-[0-9]{2} [0-9]{2}\:[0-9]{2}\:[0-9]{2}\.[0-9]{6})'
        )

        # add check
        self.add_check(self.check_commands)

        # 最後に実行
        self.load_extensions()
Ejemplo n.º 24
0
    def get_mactab(self, out_path):
        out_file = None
        if out_path != '':
            out_file = open(out_path, 'w')

        nicview = self.generate_nic_view(check_branding=True)
        ob_links = nicview.get_onboard_links()
        if len(ob_links) == 0:
            ifc = ifconfig(arg = "-a")
            link = recompile("Link")
            for l in ifc:
                lk = link.search(l, 1)
                if lk:
                    parts = l.split()
                    if len(parts) > 4:
                        print >>out_file, 'primary', parts[4]
            return

        if self.mobo.num_ifs == 2 and len(ob_links) > 1:
            if self.mobo.primary_first:
                primary = ob_links[0]
                aux = ob_links[1]
            else:
                primary = ob_links[1]
                aux = ob_links[0]

            dev_map_list = [{'device':primary, 'name':'primary'},
                            {'device':aux, 'name':'aux'}]
        
        else:
            # we only have primary no aux
            primary = ob_links[0]
            dev_map_list = [{'device':primary, 'name':'primary'}]

        for i in nicview.ifcelist:
            for nic in dev_map_list:
                if nic['device'].memory_base == i.memory_base:
                    print >>out_file, nic['name'], i.mac

        slotNameMapping = self.__parse_mgmt_mac_naming()

        redir_info = self._get_redirect_info()
        for (slot, links) in nicview.get_all_slots():
            if links:
                card = self.get_card_from_link(links[0])
                self._print_rename_pairs(card, links,
                        nicview.ifcelist, slotNameMapping,int(slot), out_file,
                        False, False, redir_info)
Ejemplo n.º 25
0
 def get_dns_servers(self):
     nscre = recompile(r'nameserver\s+(\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\s')
     result = []
     try:
         with open('/etc/resolv.conf', 'r') as resolv:
             for line in resolv:
                 mo = nscre.match(line)
                 if mo:
                     dns = mo.group(1)
                     self.log.info('Found nameserver: %s' % dns)
                     result.append(dns)
     except Exception:
         pass
     if not result:
         self.log.info('No nameserver found')
     return result
Ejemplo n.º 26
0
	def __init__( self, basedir, *args, **kwargs ):
		self.basedir = normpath( basedir )
		self.patterns = {}
		self.readers = {}
		for kind in PATTERN_KINDS:
			pattern = getattr( self, kind.upper() + '_PATTERN' )
			if pattern:
				self.patterns[ kind ] = recompile( join( self.basedir, pattern ) )
				try:
					self.readers[ kind ] = getattr( self, kind + '_reader' )
				except AttributeError:
					self.readers[ kind ] = self.default_reader
		if LOGGER.isEnabledFor( DEBUG ):
			LOGGER.debug( 'Using the following patterns and readers...' )
			for kind, pattern in self.patterns.items():
				LOGGER.debug( 'Kind {0}, pattern = {1}, reader = {2}'.format( kind, pattern.pattern, self.readers[ kind ] ) )
Ejemplo n.º 27
0
 def get_file_filters(self):
     patterns = []
     replacements = {}
     try:
         for pos, pattern in enumerate(self.config.options('filters'), 1):
             value = self.config.get('filters', pattern).strip()
             pattern = pattern.strip('\r\n \t')
             pattern = pattern.replace(r'.', r'\.')
             pattern = pattern.replace(r'*', r'.*').replace(r'?', r'.')
             pname = 'p%d' % pos
             replacements[pname] = value
             patterns.append(r'(?P<%s>%s)' % (pname, pattern))
         xre = r'^(?:\./)?(?:%s)$' % r'|'.join(patterns)
     except NoSectionError:
         xre = r'^$'
     return (recompile(xre), replacements)
Ejemplo n.º 28
0
 def __init__(self, basedir, *args, **kwargs):
     self.basedir = normpath(basedir)
     self.patterns = {}
     self.readers = {}
     for kind in PATTERN_KINDS:
         pattern = getattr(self, kind.upper() + '_PATTERN')
         if pattern:
             self.patterns[kind] = recompile(join(self.basedir, pattern))
             try:
                 self.readers[kind] = getattr(self, kind + '_reader')
             except AttributeError:
                 self.readers[kind] = self.default_reader
     if LOGGER.isEnabledFor(DEBUG):
         LOGGER.debug('Using the following patterns and readers...')
         for kind, pattern in self.patterns.items():
             LOGGER.debug('Kind {0}, pattern = {1}, reader = {2}'.format(
                 kind, pattern.pattern, self.readers[kind]))
Ejemplo n.º 29
0
 def find_suspicious_regex(self, data):
     '''
     Not used
     '''
     for sus in self.suspicious:
         temp_list = []
         temp_var = findall(
             recompile(r'(([^\n]+)?({})([^\n]+)?)'.format(sus), I),
             self.wordsstripped)
         if len(temp_var) > 0:
             for _ in temp_var:
                 temp_list.append(_[0])
         for temp_var in set(temp_list):
             data.append({
                 "Count": temp_list.count(temp_var),
                 "Detected": temp_var
             })
Ejemplo n.º 30
0
def get_raid():
    output = cat(arg = '/proc/scsi/scsi')
    raid = recompile("Vendor: (\w+)")
    for line in output:
        r = raid.search(line)
        if r:
            vendor = r.group(1)
            if vendor == 'AMCC':
                return 'TW'
            elif vendor == 'MegaRAID':
                return 'LSI'
            elif vendor == 'ATA':
                return 'NONE'
            else:
                return 'UNKNOWN'
            break
    return 'NONE'
Ejemplo n.º 31
0
    def _do_scan_scsi_mpt(self, root_dir, pattern, bus_type):
        """Return a map of all the scsi drives in the system
        """
        mpt_scsi_regex = recompile(pattern)
        scsi_dev_dir_list = listdir (root_dir)
        found_drive_count = 0

        mpt_scsi_list = {}

        for scsi_dir in scsi_dev_dir_list:
            if mpt_scsi_regex.match (scsi_dir):
                try:
                    phys_drive_info = get_sysfs_param ("%s%s/device/phys_drive_info" % (root_dir, scsi_dir))
                except Exception:
                    rlog_debug('%s%s/device/phys_drive_num went missing, don\'t insert it into the list' % (root_dir, scsi_dir))
                    continue

                try:
                    pdi_int = int(phys_drive_info)
                except ValueError:
                    continue

                if bus_type == 'scsi-mpt':
                    disk_map = self.sturgeon_disk_map
                elif bus_type == 'scsi-mpt-2':
                    disk_map = self.gar_disk_map
                elif bus_type == 'scsi-dell':
                    disk_map = self.dell_disk_map
                elif bus_type == 'scsi-rf-1u-lsi':
                    disk_map = self.rf_1u_lsi_disk_map
                elif bus_type == 'scsi-rf-2u-lsi':
                    disk_map = self.rf_2u_lsi_disk_map
                elif bus_type == 'scsi-rf-25u-lsi':
                    disk_map = self.rf_25u_lsi_disk_map
                else:
                    raise HwtoolError('Invalid bus type for scsi-mpt system [%s]' % bus_type)

                if disk_map.has_key(pdi_int):
                    phys_drive_num = disk_map[pdi_int]
                else:
                    continue

                mpt_scsi_list[phys_drive_num] = scsi_dir

        return mpt_scsi_list
Ejemplo n.º 32
0
    def __init__(self):
        '''
        Initialize QBURLSimilarity, this has to pass
        '''
        self.datastruct = {"URLs": [], "_URLs": ["Distance", "URL", "Similar"]}

        self.refs = path.abspath(path.join(path.dirname(__file__), 'refs'))
        if not self.refs.endswith(path.sep):
            self.refs = self.refs + path.sep
        self.links = recompile(
            r"((?:(http|https|ftp):\/\/)?[a-zA-Z0-9]+(\.[a-zA-Z0-9-]+)+([a-zA-Z0-9_\,\'\/\+&amp;%#\$\?\=~\.\-]*[a-zA-Z0-9_\,\'\/\+&amp;%#\$\?\=~\.\-])?)",
            I)
        self.top = "http://s3-us-west-1.amazonaws.com/umbrella-static/top-1m.csv.zip"
        self.topsliced = None
        self.topdomains = None
        self.setup(self.refs)
        self.words = []
        self.wordsstripped = ""
Ejemplo n.º 33
0
    def format(self):
        if not self.display:
            return ''

        pattern = recompile(r'{{\s*(\w+)\s*}}')
        text = ("%s%s" % (
            self.elem_color('root'),
            self.model['root']['text']))

        # Allow element nesting to the depth of 10
        for _ in range(10):
            match = pattern.search(text)

            if match:
                text = pattern.sub(self._elem_replace, text)
            else:
                break

        return text
Ejemplo n.º 34
0
def extract_json(content, name):
    """Extract json from netflix content page"""
    common.debug('Extracting {} JSON', name)
    json_str = None
    try:
        json_array = recompile(JSON_REGEX.format(name), DOTALL).findall(content.decode('utf-8'))
        json_str = json_array[0]
        json_str = json_str.replace('\"', '\\"')  # Escape double-quotes
        json_str = json_str.replace('\\s', '\\\\s')  # Escape \s
        json_str = json_str.replace('\\n', '\\\\n')  # Escape line feed
        json_str = json_str.replace('\\t', '\\\\t')  # Escape tab
        json_str = json_str.encode().decode('unicode_escape')  # finally decoding...
        return json.loads(json_str)
    except Exception:
        if json_str:
            common.error('JSON string trying to load: {}', json_str)
        import traceback
        common.error(traceback.format_exc())
        raise WebsiteParsingError('Unable to extract {}'.format(name))
Ejemplo n.º 35
0
    def get_bot_information(self, file_data):
        ret = {}
        try:
            p = recompile(
                r'var[\s]+\$config[\s]*=[\s]*array[\s]*\([\s]*(\"[^\"]*\"[\s]*=>.*,?[\s]*)*(//)?\);',
                MULTILINE)
            result = p.search(file_data)
            if result is None:
                return {}
            ret = self.get_config_values(result.group(0))
            uris = []
            server = ret['server'] if 'server' in ret else None
            server_pass = ret['pass'] if "pass" in ret else None
            port = int(ret['port']) if 'port' in ret else 6667
            chan = ret['chan'] if 'chan' in ret else None
            chan2 = ret['chan2'] if 'chan2' in ret else None
            key = ret['key'] if 'key' in ret else server_pass

            uris.append("pbot://{0}:{1}/?{2}".format(
                server, port,
                urlencode({
                    "server_pass": server_pass,
                    "chan": chan,
                    "channel_pass": key
                })))
            if chan2 is not None:
                uris.append("pbot://{0}:{1}/?{2}".format(
                    server, port,
                    urlencode({
                        "server_pass": server_pass,
                        "chan": chan2,
                        "channel_pass": key
                    })))
            ret['c2s'] = []
            for uri in uris:
                ret['c2s'].append({"c2_uri": uri})

        except KeyboardInterrupt:
            raise
        except:
            pass
        return ret
Ejemplo n.º 36
0
	def __init__( self, uid, timestamp ):
		temp_dir = mkdtemp( prefix = 'cu-', dir = '/tmp' )
		TAR_DATA.seek( 0 )
		with TarFile.open( mode = 'r', fileobj = TAR_DATA ) as tf: tf.extractall( temp_dir )
		with TarFile.open( join( UPLOAD_DIR, uid, timestamp + '.tar' ), mode = 'r' ) as tf: tf.extractall( temp_dir )
		re = recompile( r'.*/(?P<exercise>.+)/input-(?P<number>.*)\.txt' )
		cases_map = defaultdict(list)
		for path in glob( join( temp_dir, '*/*' ) ):
			match = re.match( path )
			if not match: continue
			gd = match.groupdict()
			cases_map[ gd[ 'exercise' ] ].append( gd[ 'number' ] )
		with open( join( UPLOAD_DIR, uid, 'SIGNATURE.tsv' ), 'r' ) as f: signature = f.read()
		self.signature = signature.strip().split( '\t' )
		self.uid = uid
		self.timestamp = timestamp
		self.temp_dir = temp_dir
		self.cases_map = cases_map
		self.makes_map = None
		self.suites_map = None
Ejemplo n.º 37
0
def extract_json(content, name):
    """Extract json from netflix content page"""
    common.debug('Extracting {} JSON', name)
    json_str = None
    try:
        json_array = recompile(JSON_REGEX.format(name), DOTALL).findall(content.decode('utf-8'))
        json_str = json_array[0]
        json_str_replace = json_str.replace('\\"', '\\\\"')  # Escape double-quotes
        json_str_replace = json_str_replace.replace('\\s', '\\\\s')  # Escape \s
        json_str_replace = json_str_replace.replace('\\n', '\\\\n')  # Escape line feed
        json_str_replace = json_str_replace.replace('\\t', '\\\\t')  # Escape tab
        json_str_replace = json_str_replace.encode().decode('unicode_escape')  # Decode the string as unicode
        json_str_replace = sub(r'\\(?!["])', r'\\\\', json_str_replace)  # Escape backslash (only when is not followed by double quotation marks \")
        return json.loads(json_str_replace)
    except Exception:
        if json_str:
            common.error('JSON string trying to load: {}', json_str)
        import traceback
        common.error(traceback.format_exc())
        raise WebsiteParsingError('Unable to extract {}'.format(name))
Ejemplo n.º 38
0
    def load_file(self, file_name: str) -> None:
        """
        Load ACL definition from file.

        :param file_name: Filename of acl file
        """
        acheck(str, file_name=file_name)
        line_check = recompile(
            r"^\s*(?!#)(?P<ip>[0-9a-fA-F:.]+(|(/\d{1,3})?)),(?P<acl>\d+)")

        with open(file_name, "r") as fh:
            while True:
                buff = fh.readline()
                if not buff:
                    break

                ma = line_check.match(buff)
                if not ma:
                    continue

                # Write one acl
                self.add_acl(ma.group("ip"), int(ma.group("acl")))
Ejemplo n.º 39
0
    def has_card(self, nics, cards, arg):
        """Given arg (a regex) return True if the card is in any slot in
        the system, or False otherwise
        """
        creg = recompile(arg)
        has_card = False

        links = self.make_pci_links()

        for (n, s) in self.mobo.slot_patterns:
            l = self.get_links_from_pattern(s, links)
            if l != []:
                card = self.get_card_from_link(nics, cards, l[0])
                if card == None:
                    part_num = "Unknown"
                else:
                    part_num = card.part_num

                if creg.match(part_num):
                    has_card = True
                    break

        return has_card
Ejemplo n.º 40
0
    def has_card(self, arg):
        """Given arg (a regex) return True if the card is in any slot in
        the system, or False otherwise
        """
        creg = recompile(arg)
        has_card = False

        links = self.make_pci_links()

        nicview = self.generate_nic_view(if_only = False)
        for (slotnum, slotlinks) in nicview.get_all_slots():
            if slotlinks:
                card = self.get_card_from_link(slotlinks[0])
                if card == None:
                    part_num = "Unknown"
                else:
                    part_num = card.part_num

                if creg.match(part_num):
                    has_card = True
                    break

        return has_card
Ejemplo n.º 41
0
class PidIo(ReadFile):
    '''
    PidIo handling
    '''
    FILENAME = ospath.join('proc', '%s', 'io')
    KEY = 'pidio'
    REGEX = recompile('^[a-zA-Z_]+:')

    def normalize(self):
        '''
        Translates data into dictionary

        The /proc/<pid>/io file is a number records keyed on ':' separator
        '''
        LOGGER.debug("Normalize")
        lines = self.lines
        ret = {}
        for line in lines:
            if self.REGEX.match(line):
                key, vals = line.split(':')
                val = int(vals.strip())
                ret[key] = val

        return ret
Ejemplo n.º 42
0
    def __init__(self, boundary, btype, value = None, massTCoef = None, velocity = None, flowRate = None, porosity = None, timeVariation = None,
                 description = None):
        """
        Constructor with :
        - boundary :    a mesh part element of type Cartesian or Unstructured ( made of bodies)
        
        - btype :       is a string and should be "Dirichlet", "Flux", "Mixed", "Neumann"
        
                For a "symmetry", a Neumann boundary condition with g = 0 must be specified
                
        - OPTIONAL :
        
            --> value : a PhysicalQuantity or a list of tuples (PhysicalQuantity,species)
                        or a  ChemicalState

            --> massTCoef :             float : mass transfer coefficient or set to zero

            --> velocity :      object Velocity

            --> porosity :      a scalar.

            --> flowRate :      a Flowrate, see PhysicalQuantities

            --> timeVariation :     a list of tuples [(time,chemical state)] , [(time,(list of species and eventually temperature))];
                            the temperature can also be introduced through a file.
            
        -- description a string which will be eventually set as a support for the model comprehension
         
        """
    
        bcDico = makeDico(Dirichlet = [ChemicalState, Head, Displacement, NormalForce],\
                          Flux      = [ChemicalState, HeadGradient],\
                          Neumann   = [ChemicalState, HeadGradient])

        CommonBoundaryCondition.__init__(self,boundary, btype, value, bcDico, description)
#        print "dbg commonmodel CommonBoundaryCondition1"
        
        if type(boundary) is types.ListType:
#            print "type of boundary is list type "
            #raw_input("type of boundary is list type ")
            verifyClassList(boundary,[ CartesianMesh, Body])
            pass
        else:
            memberShip(boundary,[ CartesianMesh, Body])
            #raw_input("membership ")
            pass
        #raw_input("dbg commonmodel CommonBoundaryCondition2")
        self.boundary = boundary

        if type(btype) != types.StringType:
            raise TypeError, " problem on the definition of  the boundary type "
        if btype.lower() not in ["dirichlet","symmetry","flux","mixed","neumann","noflux"]: raise Exception, " check the boundary condition kind"
        
        self.btype = btype

        self.chemicalStateValue = None
        self.headValue = None
        self.massTCoef = 0.
        self.value_species = None
        self.value_property = None
        self.value = None
                                                                                            #
                                                                                            # the next ones are linked to a well sim.
                                                                                            #
        self.enthalpyBoundaryCondition     = None
        self.wellMassFlowBoundaryCondition = None
        self.wellPressureBoundaryCondition = None
                                                                                            #
                                                                                            # We treat B.C. 
                                                                                            # by default, a chemical state is introduced
                                                                                            # and in the case of a transient flow, eventually a list
                                                                                            # made of a chemical state, a displacement, a head.
                                                                                            #
        if type(value) is types.ListType:
            #
            # useful for debugging
            #
            #for i in value:
            #    print "dbg commonmodel",type(i)
            #    pass
            verifyClassList(value, [ Head, ChemicalState, Displacement, NormalForce, TupleType])
            for bc in value:
                if isinstance(bc, Head):
                    self.headValue = bc # it should be the charge
                    pass
                elif isinstance(bc, NormalForce):
                    self.normalForceValue = bc # it should be NormalForce
                    pass
                elif isinstance(bc, Displacement):
                    self.displacementValue = bc # it should be Displacement
                    pass
                elif isinstance(bc, ChemicalState):
                    self.value = bc
                    self.chemicalStateValue = bc # it should be ChemicalState
                    pass
                elif bc[0].lower() == "enthalpy":                                           # it can also be an enthalpy in the
                                                                                            # case of a well
                                                                                            #
                    if type(bc[1]) == types.StringType:
                        self.enthalpyBoundaryCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'),bc[1])
                        pass
                    elif type(bc[1]) in [types.FloatType,types.IntType]:
                        self.enthalpyBoundaryCondition = bc[1]
                    pass
                elif bc[0].lower() == "wellpressure":                                       # it can also be the pressure in the
                                                                                            # case of a well
                                                                                            #
                    if type(bc[1]) == types.StringType:
                        self.wellPressureBoundaryCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'),bc[1])
                        pass
                    elif type(bc[1]) in [types.FloatType,types.IntType]:
                        self.wellPressureBoundaryCondition = bc[1]
                        #print("commonmodel well pressure debug yes\n")
                        #raw_input()
                        pass
                    pass
                elif bc[0].lower() == "wellmassflow":                                       # it can also be the mass flow in the
                                                                                            # case of a well
                                                                                            #
                    if type(bc[1]) == types.StringType:
                        self.wellMassFlowBoundaryCondition = refindall(recompile(r'([$mfunction()ifelse{} ><_=xyzXYZ0-9.*/+-])'),bc[1])
                        pass
                    elif type(bc[1]) in [types.FloatType,types.IntType]:
                        self.wellMassFlowBoundaryCondition = bc[1]
                        pass
                    elif type(bc[1]) is tuple:
                        self.wellMassFlowBoundaryCondition = bc[1]
                        pass
                    pass
                else:
                    #self.value = bc # it should be chemistry
                    pass
                pass
            pass
        else:
            memberShip(value,[PhysicalQuantity, ChemicalState, Displacement, NormalForce])
            if (isinstance(value, PhysicalQuantity) or
                type(value) is types.ListType):
                self.value_species, self.value_property = createList(value, PhysicalQuantity)
                pass
            else:
                self.value = value
                self.chemicalStateValue = value
                pass
            pass
        print "massTCoef",massTCoef,type(massTCoef)
        if massTCoef:
            memberShip(massTCoef,[types.FloatType])
            if (type(massTCoef) is types.FloatType): 
                self.massTCoef = massTCoef
                pass
            else:
                self.massTCoef = 0.0
                pass
            print " common model mass transfer coefficient ",self.massTCoef
            pass

        if porosity:
            self.porosity = porosity
            pass

        if velocity:
            memberShip(velocity,Velocity)
            pass
        self.velocity = velocity

        if flowRate:
            if flowRate.__class__.__name__=="FlowRate":
                pass
            else:
                flowrate = FlowRate(flowrate,"m**3/s") # the flow rate is supposed to be in m**3/s
                pass
        self.flowRate = flowRate

        if timeVariation:
            if type(timeVariation) != types.ListType:
                raise typeError, " Time variation should be a list"
            for item in timeVariation:
                if type(item[0]) not in [types.FloatType,types.IntType]:
                    raise typeError, "item[@]  should be a list"
                memberShip(item[1],[ChemicalState])
                pass
            pass

        self.timeVariation = timeVariation
        
        return None
Ejemplo n.º 43
0
        bot.say(rpl, fcfs=False, strins=titles)
    else:
        bot.say("(%s) No results found." % numresults)


def init(bot):
    global GAPI_MODULE  # oh nooooooooooooooooo

    bot.dbCheckCreateTable(
        "youtubeseen", '''CREATE TABLE youtubeseen(
			source TEXT PRIMARY KEY COLLATE NOCASE,
			id TEXT
		);''')
    GAPI_MODULE = bot.getModule("googleapi")
    return True


#mappings to methods
mappings = (
    Mapping(command=("youtube", "yt"), function=youtube),
    Mapping(types=["privmsged"],
            regex=recompile(
                r"\bhttps?\://(?:www\.)?youtu\.be\/[a-zA-Z0-9_-]{11}.*\b"),
            function=seen_video),
    Mapping(
        types=["privmsged"],
        regex=recompile(
            r"\bhttps?\://(?:www\.)?youtube\.com\/.*v\=[a-zA-Z0-9_-]{11}.*\b"),
        function=seen_video),
)
Ejemplo n.º 44
0
def findinRoot(pattern='',
               root='.',
               maxsize=1,
               types=None,
               var=None,
               up=None,
               re=None,
               exclude=['pyc', 'swp', 'swo', 'gz', 'whl']):
    '''
    在root及子路径下的所有文件中 查找 pattern。存在 则打印出对应文件的那一行。
    
    Parameters
    ----------
    pattern : str
        要查找的内容
    root : str, default '.'
        路径 即根文件夹
    maxsize : number, default 1
        被查找文件的最大文件大小,单位为 MB
        为避免不小心查找太大的二进制文件,默认不超过 1MB
        设为 None,则不设大小限制
    types : str or list, default None
        被查找文件的类型限制
        str:单个类型, list:多个类型 默认为所有文件
    var : str or bool, default None
        use re.compile('(^|[^a-zA-Z0-9_])%s([^a-zA-Z0-9_]|$)'%var) to find var name
    up : str or bool, default None
        Ignoring letter case of variable names
    re : str or bool, default None
        use re.compile(re) to search each line
    exclude : list , default ['pyc', 'swp', 'swo',]
        exclude file types in this list
    '''
    from re import compile as recompile
    if types is not None:
        if not isinstance(types, (list, tuple)):
            types = [types]
        types = [t.replace('.', '') for t in types]

    def intypes(path):
        typee = '.' in path and path.split('.')[-1]
        if types is None:
            return typee not in exclude
        return typee in types

    if not pattern:
        pattern = var or up or re
    searchin = lambda strr: pattern in strr
    if var:
        pa = '''(^|[^a-zA-Z0-9_'"])%s([^a-zA-Z0-9_'"]|$)''' % pattern
        pa = recompile(pa)
        searchin = lambda strr: pa.search(strr)
    elif up:
        lower = pattern.lower()
        searchin = lambda strr: lower in strr.lower()
    elif re:
        pa = recompile(pattern)
        searchin = lambda strr: pa.search(strr)

    def find(path):
        if isfile(path):
            if (getsizem(path) <= maxsize
                    or maxsize is None) and intypes(path):
                try:
                    s = openread(path)
                    if py2:
                        from .toolLog import tounicode
                        s = tounicode(s)
                    lines = [(i, l) for i, l in enumerate(s.split('\n'), 1)
                             if searchin(l)]
                    if not len(lines):
                        return
                    print('"%s" in "%s" with %s' %
                          ('\x1b[31m%s\x1b[0m' % pattern, '\x1b[36m%s\x1b[0m' %
                           path, '\x1b[35m%s Lines\x1b[0m' % len(lines)))
                    for (i, l) in lines:
                        if l.startswith('   '):
                            l = '... ' + l.strip()
                        if len(l) > 83:
                            l = l[:80] + ' ...'
                        print(
                            '\t%s:%s' %
                            ('\x1b[35m%s\x1b[0m' % i, '\x1b[31m%s\x1b[0m' % l))
                    print("")
                except:
                    return

    listdirWithFun(root, find)
Ejemplo n.º 45
0
    except:
        # make sure contents of paste is at least dumped somewhere for recovery if need be.
        if items is not None:
            if altmsg: tmsg = altmsg % sep[1].join(items)
            else: tmsg = basemsg % sep[1].join(items)
        else:
            tmsg = basemsg
        print "ATTEMPTED PASTEHELPER MSG: %r" % tmsg
        raise


def english_list(l):
    """Stringify a list into 'arg1, arg2 and arg3', or 'arg1' if single-argument."""
    if not isinstance(l, (list, tuple)):
        l = (l, )
    if len(l) > 1:
        return "%s, and %s" % (", ".join(l[:-1]), l[-1])
    else:
        return l[0]


URLREGEX = recompile(
    r"""
\bhttps?\://					# schema
[\w.\:-]+						# domain
(?:/)?							# first path separator
(?:[\w%./_~!$&'()*+,;=:@-]+)?	# path
(?:\?[^ #\n\r]+)?				# querystring
(?:\#[^ #\n\r]+)?				# anchor (shouldn't be nested in querystring group)
""", UNICODE | IGNORECASE | VERBOSE)
Ejemplo n.º 46
0
def is_ip_or_domain(s):
    if s[-1] == "/":
        s = s[:-1]
    if s[:7] == "http://":
        s = s[7:]
    s = s.split(":")[0]
    p = recompile("^[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]$")
    if p.match(s) is not None:
        return True
    # todo IPv6 check
    tlds = ["AC", "ACADEMY", "ACTOR", "AD", "AE", "AERO", "AF", "AG", "AGENCY", "AI", "AL", "AM", "AN", "AO", "AQ",
            "AR", "ARPA",
            "AS", "ASIA", "AT", "AU", "AW", "AX", "AZ", "BA", "BAR", "BARGAINS", "BB", "BD", "BE", "BERLIN", "BEST",
            "BF", "BG",
            "BH", "BI", "BID", "BIKE", "BIZ", "BJ", "BLUE", "BM", "BN", "BO", "BOUTIQUE", "BR", "BS", "BT", "BUILD",
            "BUILDERS",
            "BUZZ", "BV", "BW", "BY", "BZ", "CA", "CAB", "CAMERA", "CAMP", "CARDS", "CAREERS", "CAT", "CATERING",
            "CC", "CD",
            "CENTER", "CEO", "CF", "CG", "CH", "CHEAP", "CHRISTMAS", "CI", "CK", "CL", "CLEANING", "CLOTHING",
            "CLUB", "CM",
            "CN", "CO", "CODES", "COFFEE", "COM", "COMMUNITY", "COMPANY", "COMPUTER", "CONDOS", "CONSTRUCTION",
            "CONTRACTORS",
            "COOL", "COOP", "CR", "CRUISES", "CU", "CV", "CW", "CX", "CY", "CZ", "DANCE", "DATING", "DE",
            "DEMOCRAT", "DIAMONDS",
            "DIRECTORY", "DJ", "DK", "DM", "DNP", "DO", "DOMAINS", "DZ", "EC", "EDU", "EDUCATION", "EE", "EG",
            "EMAIL",
            "ENTERPRISES", "EQUIPMENT", "ER", "ES", "ESTATE", "ET", "EU", "EVENTS", "EXPERT", "EXPOSED", "FARM",
            "FI", "FISH",
            "FJ", "FK", "FLIGHTS", "FLORIST", "FM", "FO", "FOUNDATION", "FR", "FUTBOL", "GA", "GALLERY", "GB", "GD",
            "GE", "GF",
            "GG", "GH", "GI", "GIFT", "GL", "GLASS", "GM", "GN", "GOV", "GP", "GQ", "GR", "GRAPHICS", "GS", "GT",
            "GU", "GUITARS",
            "GURU", "GW", "GY", "HK", "HM", "HN", "HOLDINGS", "HOLIDAY", "HOUSE", "HR", "HT", "HU", "ID", "IE",
            "IL", "IM",
            "IMMOBILIEN", "IN", "INDUSTRIES", "INFO", "INK", "INSTITUTE", "INT", "INTERNATIONAL", "IO", "IQ", "IR",
            "IS", "IT",
            "JE", "JM", "JO", "JOBS", "JP", "KAUFEN", "KE", "KG", "KH", "KI", "KIM", "KITCHEN", "KIWI", "KM", "KN",
            "KOELN", "KP",
            "KR", "KRED", "KW", "KY", "KZ", "LA", "LAND", "LB", "LC", "LI", "LIGHTING", "LIMO", "LINK", "LK", "LR",
            "LS", "LT", "LU",
            "LUXURY", "LV", "LY", "MA", "MAISON", "MANAGEMENT", "MANGO", "MARKETING", "MC", "MD", "ME", "MENU",
            "MG", "MH", "MIL",
            "MK", "ML", "MM", "MN", "MO", "MOBI", "MODA", "MONASH", "MP", "MQ", "MR", "MS", "MT", "MU", "MUSEUM",
            "MV", "MW", "MX",
            "MY", "MZ", "NA", "NAGOYA", "NAME", "NC", "NE", "NET", "NEUSTAR", "NF", "NG", "NI", "NINJA", "NL", "NO",
            "NP", "NR",
            "NU", "NZ", "OKINAWA", "OM", "ONL", "ORG", "PA", "PARTNERS", "PARTS", "PE", "PF", "PG", "PH", "PHOTO",
            "PHOTOGRAPHY",
            "PHOTOS", "PICS", "PINK", "PK", "PL", "PLUMBING", "PM", "PN", "POST", "PR", "PRO", "PRODUCTIONS",
            "PROPERTIES", "PS",
            "PT", "PUB", "PW", "PY", "QA", "QPON", "RE", "RECIPES", "RED", "RENTALS", "REPAIR", "REPORT", "REVIEWS",
            "RICH", "RO",
            "RS", "RU", "RUHR", "RW", "SA", "SB", "SC", "SD", "SE", "SEXY", "SG", "SH", "SHIKSHA", "SHOES", "SI",
            "SINGLES", "SJ",
            "SK", "SL", "SM", "SN", "SO", "SOCIAL", "SOLAR", "SOLUTIONS", "SR", "ST", "SU", "SUPPLIES", "SUPPLY",
            "SUPPORT", "SV",
            "SX", "SY", "SYSTEMS", "SZ", "TATTOO", "TC", "TD", "TECHNOLOGY", "TEL", "TF", "TG", "TH", "TIENDA",
            "TIPS", "TJ", "TK",
            "TL", "TM", "TN", "TO", "TODAY", "TOKYO", "TOOLS", "TP", "TR", "TRAINING", "TRAVEL", "TT", "TV", "TW",
            "TZ", "UA", "UG",
            "UK", "UNO", "US", "UY", "UZ", "VA", "VACATIONS", "VC", "VE", "VENTURES", "VG", "VI", "VIAJES",
            "VILLAS", "VISION",
            "VN", "VOTE", "VOTING", "VOTO", "VOYAGE", "VU", "WANG", "WATCH", "WED", "WF", "WIEN", "WIKI", "WORKS",
            "WS",
            "XN--3BST00M", "XN--3DS443G", "XN--3E0B707E", "XN--45BRJ9C", "XN--55QW42G", "XN--55QX5D", "XN--6FRZ82G",
            "XN--6QQ986B3XL", "XN--80AO21A", "XN--80ASEHDB", "XN--80ASWG", "XN--90A3AC", "XN--C1AVG", "XN--CG4BKI",
            "XN--CLCHC0EA0B2G2A9GCD", "XN--D1ACJ3B", "XN--FIQ228C5HS", "XN--FIQ64B", "XN--FIQS8S", "XN--FIQZ9S",
            "XN--FPCRJ9C3D", "XN--FZC2C9E2C", "XN--GECRJ9C", "XN--H2BRJ9C", "XN--I1B6B1A6A2E", "XN--IO0A7I",
            "XN--J1AMH",
            "XN--J6W193G", "XN--KPRW13D", "XN--KPRY57D", "XN--L1ACC", "XN--LGBBAT1AD8J", "XN--MGB9AWBF",
            "XN--MGBA3A4F16A", "XN--MGBAAM7A8H", "XN--MGBAB2BD", "XN--MGBAYH7GPA", "XN--MGBBH1A71E",
            "XN--MGBC0A9AZCG",
            "XN--MGBERP4A5D4AR", "XN--MGBX4CD0AB", "XN--NGBC5AZD", "XN--NQV7F", "XN--NQV7FS00EMA", "XN--O3CW4H",
            "XN--OGBPF8FL", "XN--P1AI", "XN--PGBS0DH", "XN--Q9JYB4C", "XN--RHQV96G", "XN--S9BRJ9C", "XN--UNUP4Y",
            "XN--WGBH1C", "XN--WGBL6A", "XN--XKC2AL3HYE2A", "XN--XKC2DL3A5EE0H", "XN--YFRO4I67O", "XN--YGBI2AMMX",
            "XN--ZFR164B", "XXX", "XYZ", "YE", "YT", "ZA", "ZM", "ZONE", "ZW"]

    # assume we have a domain now
    if s.find(".") == -1:
        return False
    if s[s.rfind(".") + 1:].upper() not in tlds:
        return False
    for c in s[:s.rfind(".")]:
        if c not in ascii_lowercase + ascii_uppercase + digits + ":.-":
            return False
    return True
 def __init__(self,name,depth,mother=""):
   self.name=name
   self.compname=recompile(name)
   self.mother=mother
   self.depth=depth
Ejemplo n.º 48
0
from urllib2 import Request, urlopen, HTTPError
from urllib import urlencode
from json import load
from traceback import format_exc
from re import compile as recompile

REQUIRES = ("location", "wuapi")
WUAPI_MODULE = None
LOC_MODULE = None

# Weather for Lansing, MI: 32.2F (0.1C), Wind Chill of 25F (-4C), Partly Cloudy, Humidity 67%, Wind from the East at 8.0mph (12.9km/h)
#gusting to 14.0mph (22.5km/h), Low/High 38F/41F (3C/5C).  Flurries or snow showers possible early. A mix of clouds and sun.
#High 41F. Winds S at 10 to 20 mph.
WEATHER_RPL = "Weather for %s: \x02%sF\x02 (\x02%sC\x02), Low/High \x02%sF\x02/\x02%sF\x02 (\x02%sC\x02/\x02%sC\x02), %s, Humidity %s, %s %s"
WEATHER_RPL_WC = "Weather for %s: \x02%sF\x02 (\x02%sC\x02), Wind Chill of \x02%sF\x02 (\x02%sC\x02), Low/High \x02%sF\x02/\x02%sF\x02 (\x02%sC\x02/\x02%sC\x02), %s, Humidity %s, %s %s"
GHETTOWIND_REGEX = recompile(r'Winds [NSEW]{1,3} at (\d+) to (\d+) (km/h)\.')
GHETTOTEMP_REGEX = recompile(r'\. (?:High|Low) (?:near )?(?:-)?\d+C\.')

# Forecast for Ann Arbor, MI: Today - Chance of Showers 54F/77F (12C/25C), Wed - Clear 55F/81F (13C/27C), Thu - Mostly Sunny 57F/84F (14C/29C), Fri - Mostly Sunny 63F/88F (17C/31C)
FORECAST_RPL = "Forecast for %s: %s"
# Today - Chance of Showers 54F/77F (12C/25C) PoP: %s Hum: %s
FORECAST_DAY = "%s - %s \x02%sF\x02/\x02%sF\x02 (\x02%sC\x02/\x02%sC\x02) PoP: \x02%s%%\x02 Hum: \x02%s%%\x02"


def _formatWind(matchobj):
    mpos = matchobj.regs
    orig = matchobj.string
    # e.g. ((61, 85), (72, 74), (78, 80), (81, 84))
    #        whole,    speed1,    speed2,  unit
    pre = orig[mpos[0][0]:mpos[1][0]]
Ejemplo n.º 49
0
class LoginRequiredMiddleware(object):
    """
    Middleware that requires a user to be authenticated to view any page on
    the site that hasn't been white listed. Exemptions to this requirement
    can optionally be specified in settings via a list of regular expressions
    in LOGIN_EXEMPT_URLS (which you can copy from your urls.py).

    Requires authentication middleware and template context processors to be
    loaded. You'll get an error if they aren't.

    Accounts OAuth access_token authentication.

    """
    EXEMPT_URLS = [recompile(str(settings.LOGIN_URL))]
    if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
        EXEMPT_URLS += [
            recompile(str(expr)) for expr in settings.LOGIN_EXEMPT_URLS
        ]

    API_URLS = ['/api/']

    def return_api_forbidden(self, message=None):
        response = {"denied": "You do not have permission to access this resource. " + \
                              "You may need to login or otherwise authenticate the request."}
        if message:
            response.update({"detail": message})
        return HttpResponseForbidden(json.dumps(response))

    def return_need_auth(self, request, view, args, kwargs):
        if request.is_ajax():
            return self.return_api_forbidden()
        else:
            return login_required(view)(request, args, kwargs)

    def process_view(self, request, view, *args, **kwargs):
        if hasattr(request, 'user') and request.user.is_authenticated():
            pass  # user is logged in, no further checking required

        elif any(url in request.path_info
                 for url in LoginRequiredMiddleware.API_URLS):

            # get the ip address for the authlog
            try:
                address = request.META['HTTP_X_FORWARDED_FOR']
            except KeyError:
                address = request.META['REMOTE_ADDR']
            # log the attempt
            authlog = AuthLog()
            authlog.ip_address = address
            authlog.requested_url = request.get_full_path()

            # api uses OAuth20
            try:
                if OAuth20Authentication().is_authenticated(request):
                    user = request.user
                    authlog.user = user
                    authlog.username = user.email
                    authlog.authenticated = True
                    authlog.save()
                else:
                    authlog.save()
                    return self.return_api_forbidden()
            except OAuthError as err:
                authlog.message = err
                authlog.save()
                return self.return_api_forbidden(message=err)

        elif hasattr(request, 'user') and not request.user.is_authenticated():
            if not (getattr(view, 'login_exempt', False) or any(
                    m.match(request.path_info)
                    for m in LoginRequiredMiddleware.EXEMPT_URLS)):
                return self.return_need_auth(request, view, args, kwargs)
        elif not hasattr(request, 'user'):
            raise Exception(
                "The Login Required middleware requires authentication middleware to be installed."
            )
Ejemplo n.º 50
0
from libraries.rggQt import QAbstractSocket, QTcpSocket
from libraries.rggJson import jsondumps, jsonloads
from libraries.rggSystem import fake, mainWindow, signal
from libraries.rggConstants import UNICODE_STRING, BASE_STRING

# Major protocols used by the socket
PROTOCOL_UNKNOWN = 'RGG_UNKNOWN'  # unidentified protocol
PROTOCOL_USER = '******'  # RPC protocol
PROTOCOL_TRANSFER = 'RGG_TRANSFER'  # file transfer protocol

PROTOCOLS = (PROTOCOL_UNKNOWN, PROTOCOL_USER, PROTOCOL_TRANSFER)

# Sent in 4kb chunks
CHUNK_SIZE = 4 * 1024
EMPTY_REGEX = recompile('^\s*$')

PARM_COMMAND = '-command'
PARM_INTERNAL = '-internal'
PARM_ARGS = '-args'


def generateChecksum(file):
    seeked = file.seek(0)
    if not seeked or file.pos() != 0:
        raise IOError("Unable to make file seek.")

    hash = md5()
    MD5_CHUNK_SIZE = 4096
    totalsize = 0
    size = file.size()
Ejemplo n.º 51
0
:author:        Joachim Hospes

:version:       $Revision: 1.2 $
:contact:       $Author: Hospes, Gerd-Joachim (uidv8815) $ (last change)
:date:          $Date: 2015/09/29 17:02:15CEST $
"""

# - imports -----------------------------------------------------------------------------------------------------------
from win32api import GetVolumeInformation, error as win_error
from re import compile as recompile
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from sys import exit as sexit

# - defines -----------------------------------------------------------------------------------------------------------
HDD_NAME_REGX = r'^DMT\d{6,6}$'
HDD_NAME_MATCH = recompile(HDD_NAME_REGX)


# - functions ---------------------------------------------------------------------------------------------------------
def hdd_label_check(name, raise_exception=False):
    """
    check if hdd label is following naming convention (see module docu above)

    with raise_exception set to True it raises a ValueError if name does not match

    :param name: label name to check
    :type  name: str
    :param raise_exception: set to True if a ValueError should be thrown in case of missing the convention
    :type  raise_exception: bool
    :return: check result: True if naming convention is confirmed
    :rtype:  bool
Ejemplo n.º 52
0
 def __init__(self, body, value, description = None):
     """
     constructor with :
     - body : object body or CartesianMesh
     - value :   a PhysicalQuantity,
             a list of tuples (PhysicalQuantity,species)
                 a ChemicalState or 
                 a tuple to introduce a function on a specific variable
     """
     if type(body) is types.ListType:
         verifyClassList(body,[CartesianMesh])
         pass
     else:
         memberShip(body,[CartesianMesh, Body])
         pass
     self.zone = body
     self.body = body
     self.value_species = None
     self.value_property = None
     self.value = None
     self.headValue = None
     #
     #  Linked to the treatment of a wellbore
     #
     self.enthalpyInitialCondition = None
     self.wellFeedZoneInitialCondition = None
     self.temperatureInitialCondition = None
     self.wellMassFlowInitialCondition = None
     self.wellPressureInitialCondition = None
     #
     if type(value) is types.ListType:
         for i in value:
             print ("dbg commonmodel",type(i))
             pass
         verifyClassList(value, [ Head, ChemicalState, Displacement, types.TupleType])
         for ic in value:
             if isinstance(ic, Head):
                 self.headValue = ic                                                     # It should be the charge
                 pass
             elif isinstance(ic, (Displacement, ChemicalState)) :
                 self.value = ic                                                         # It should be chemistry or a displacement
                 pass
             elif isinstance(ic, types.TupleType):
                 #print("debug commonmodel ic %s\n"%(ic[0].lower()))
                 if ic[0].lower() == "enthalpy":                                         # It can also be an enthalpy in the
                                                                                         # case of a well
                     if type(ic[1]) == types.StringType:
                         #raw_input("common model debug")
                         self.enthalpyInitialCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'), ic[1])
                         pass
                     pass
                 elif ic[0].lower().replace(" ","") == "feedzoneheatsource":             # We introduce here a heat source linked to a feed zone.
                     if type(ic[1]) in [types.TupleType, types.ListType]:                # It should be a tuple: position and value of the source term.
                         self.wellFeedZoneInitialCondition = ic[1]
                         pass
                     elif type(ic[1]) is types.StringType:                               # It should be a tuple: position and value of the source term.
                         self.wellFeedZoneInitialCondition = refindall(recompile(r'([ifelsxyzXYZ0-9.*;()/+-<>=])'), ic[1])
                         pass
                     
                     #print("in commonmodel ",self.wellFeedZoneInitialCondition)
                     #raw_input()
                     pass
                 elif ic[0].lower() == "temperature":                                    # It should be temperature otherwise a warning
                                                                                         # is raised. We extract the formula thanks to !=
                                                                                         # regular expressions modules.
                     if type(ic[1]) == types.StringType:
                         self.temperatureInitialCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'), ic[1])
                         pass
                     pass
                 elif ic[0].lower().replace(" ","") == "wellmassflow":                   # It can also be the mass flow in the
                                                                                         # case of a well
                     if type(ic[1]) == types.StringType:
                         self.wellMassFlowInitialCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'), ic[1])
                         pass
                     elif type(ic[1]) in [types.FloatType,types.IntType]:
                         self.wellMassFlowInitialCondition = ic[1]
                         pass
                     pass
                 elif ic[0].lower().replace(" ","") == "wellpressure":                   # It can also be the pressure in the
                                                                                         # case of a well
                     if type(ic[1]) == types.StringType:
                         self.wellPressureInitialCondition = refindall(recompile(r'([xyzXYZ0-9.*/+-])'), ic[1])
                         pass
                     elif type(ic[1]) in [types.FloatType, types.IntType]:
                         self.wellPressureInitialCondition = ic[1]
                         #print("commonmodel well pressure debug yes\n")
                         #raw_input()
                         pass
                     pass
                 else:
                     raise Warning, "check the  name of the variable "
                 pass
             else:
                 if (isinstance(ic, PhysicalQuantity) or type(ic) is types.ListType): 
                     self.value_species, self.value_property  = createList(ic, PhysicalQuantity)
                     pass
                 else:
                     self.value = ic
                     pass
                 pass
             pass
         pass
     else:
         memberShip(value,[PhysicalQuantity,ChemicalState])
         if (isinstance(value, PhysicalQuantity) or type(value) is types.ListType): 
             self.value_species,self.value_property  = createList(value, PhysicalQuantity)
             pass
         else:
             self.value = value
             pass
         pass
     self.description = description
     return None
Ejemplo n.º 53
0
def is_ip_or_domain(s):
    if s[-1] == "/":
        s = s[:-1]
    if s[:7] == "http://":
        s = s[7:]
    s = s.split(":")[0]
    p = recompile(
        "^[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]\.[12]?[0-9]?[0-9]$"
    )
    if p.match(s) is not None:
        return True
    # todo IPv6 check
    tlds = [
        "AC", "ACADEMY", "ACTOR", "AD", "AE", "AERO", "AF", "AG", "AGENCY",
        "AI", "AL", "AM", "AN", "AO", "AQ", "AR", "ARPA", "AS", "ASIA", "AT",
        "AU", "AW", "AX", "AZ", "BA", "BAR", "BARGAINS", "BB", "BD", "BE",
        "BERLIN", "BEST", "BF", "BG", "BH", "BI", "BID", "BIKE", "BIZ", "BJ",
        "BLUE", "BM", "BN", "BO", "BOUTIQUE", "BR", "BS", "BT", "BUILD",
        "BUILDERS", "BUZZ", "BV", "BW", "BY", "BZ", "CA", "CAB", "CAMERA",
        "CAMP", "CARDS", "CAREERS", "CAT", "CATERING", "CC", "CD", "CENTER",
        "CEO", "CF", "CG", "CH", "CHEAP", "CHRISTMAS", "CI", "CK", "CL",
        "CLEANING", "CLOTHING", "CLUB", "CM", "CN", "CO", "CODES", "COFFEE",
        "COM", "COMMUNITY", "COMPANY", "COMPUTER", "CONDOS", "CONSTRUCTION",
        "CONTRACTORS", "COOL", "COOP", "CR", "CRUISES", "CU", "CV", "CW", "CX",
        "CY", "CZ", "DANCE", "DATING", "DE", "DEMOCRAT", "DIAMONDS",
        "DIRECTORY", "DJ", "DK", "DM", "DNP", "DO", "DOMAINS", "DZ", "EC",
        "EDU", "EDUCATION", "EE", "EG", "EMAIL", "ENTERPRISES", "EQUIPMENT",
        "ER", "ES", "ESTATE", "ET", "EU", "EVENTS", "EXPERT", "EXPOSED",
        "FARM", "FI", "FISH", "FJ", "FK", "FLIGHTS", "FLORIST", "FM", "FO",
        "FOUNDATION", "FR", "FUTBOL", "GA", "GALLERY", "GB", "GD", "GE", "GF",
        "GG", "GH", "GI", "GIFT", "GL", "GLASS", "GM", "GN", "GOV", "GP", "GQ",
        "GR", "GRAPHICS", "GS", "GT", "GU", "GUITARS", "GURU", "GW", "GY",
        "HK", "HM", "HN", "HOLDINGS", "HOLIDAY", "HOUSE", "HR", "HT", "HU",
        "ID", "IE", "IL", "IM", "IMMOBILIEN", "IN", "INDUSTRIES", "INFO",
        "INK", "INSTITUTE", "INT", "INTERNATIONAL", "IO", "IQ", "IR", "IS",
        "IT", "JE", "JM", "JO", "JOBS", "JP", "KAUFEN", "KE", "KG", "KH", "KI",
        "KIM", "KITCHEN", "KIWI", "KM", "KN", "KOELN", "KP", "KR", "KRED",
        "KW", "KY", "KZ", "LA", "LAND", "LB", "LC", "LI", "LIGHTING", "LIMO",
        "LINK", "LK", "LR", "LS", "LT", "LU", "LUXURY", "LV", "LY", "MA",
        "MAISON", "MANAGEMENT", "MANGO", "MARKETING", "MC", "MD", "ME", "MENU",
        "MG", "MH", "MIL", "MK", "ML", "MM", "MN", "MO", "MOBI", "MODA",
        "MONASH", "MP", "MQ", "MR", "MS", "MT", "MU", "MUSEUM", "MV", "MW",
        "MX", "MY", "MZ", "NA", "NAGOYA", "NAME", "NC", "NE", "NET", "NEUSTAR",
        "NF", "NG", "NI", "NINJA", "NL", "NO", "NP", "NR", "NU", "NZ",
        "OKINAWA", "OM", "ONL", "ORG", "PA", "PARTNERS", "PARTS", "PE", "PF",
        "PG", "PH", "PHOTO", "PHOTOGRAPHY", "PHOTOS", "PICS", "PINK", "PK",
        "PL", "PLUMBING", "PM", "PN", "POST", "PR", "PRO", "PRODUCTIONS",
        "PROPERTIES", "PS", "PT", "PUB", "PW", "PY", "QA", "QPON", "RE",
        "RECIPES", "RED", "RENTALS", "REPAIR", "REPORT", "REVIEWS", "RICH",
        "RO", "RS", "RU", "RUHR", "RW", "SA", "SB", "SC", "SD", "SE", "SEXY",
        "SG", "SH", "SHIKSHA", "SHOES", "SI", "SINGLES", "SJ", "SK", "SL",
        "SM", "SN", "SO", "SOCIAL", "SOLAR", "SOLUTIONS", "SR", "ST", "SU",
        "SUPPLIES", "SUPPLY", "SUPPORT", "SV", "SX", "SY", "SYSTEMS", "SZ",
        "TATTOO", "TC", "TD", "TECHNOLOGY", "TEL", "TF", "TG", "TH", "TIENDA",
        "TIPS", "TJ", "TK", "TL", "TM", "TN", "TO", "TODAY", "TOKYO", "TOOLS",
        "TP", "TR", "TRAINING", "TRAVEL", "TT", "TV", "TW", "TZ", "UA", "UG",
        "UK", "UNO", "US", "UY", "UZ", "VA", "VACATIONS", "VC", "VE",
        "VENTURES", "VG", "VI", "VIAJES", "VILLAS", "VISION", "VN", "VOTE",
        "VOTING", "VOTO", "VOYAGE", "VU", "WANG", "WATCH", "WED", "WF", "WIEN",
        "WIKI", "WORKS", "WS", "XN--3BST00M", "XN--3DS443G", "XN--3E0B707E",
        "XN--45BRJ9C", "XN--55QW42G", "XN--55QX5D", "XN--6FRZ82G",
        "XN--6QQ986B3XL", "XN--80AO21A", "XN--80ASEHDB", "XN--80ASWG",
        "XN--90A3AC", "XN--C1AVG", "XN--CG4BKI", "XN--CLCHC0EA0B2G2A9GCD",
        "XN--D1ACJ3B", "XN--FIQ228C5HS", "XN--FIQ64B", "XN--FIQS8S",
        "XN--FIQZ9S", "XN--FPCRJ9C3D", "XN--FZC2C9E2C", "XN--GECRJ9C",
        "XN--H2BRJ9C", "XN--I1B6B1A6A2E", "XN--IO0A7I", "XN--J1AMH",
        "XN--J6W193G", "XN--KPRW13D", "XN--KPRY57D", "XN--L1ACC",
        "XN--LGBBAT1AD8J", "XN--MGB9AWBF", "XN--MGBA3A4F16A", "XN--MGBAAM7A8H",
        "XN--MGBAB2BD", "XN--MGBAYH7GPA", "XN--MGBBH1A71E", "XN--MGBC0A9AZCG",
        "XN--MGBERP4A5D4AR", "XN--MGBX4CD0AB", "XN--NGBC5AZD", "XN--NQV7F",
        "XN--NQV7FS00EMA", "XN--O3CW4H", "XN--OGBPF8FL", "XN--P1AI",
        "XN--PGBS0DH", "XN--Q9JYB4C", "XN--RHQV96G", "XN--S9BRJ9C",
        "XN--UNUP4Y", "XN--WGBH1C", "XN--WGBL6A", "XN--XKC2AL3HYE2A",
        "XN--XKC2DL3A5EE0H", "XN--YFRO4I67O", "XN--YGBI2AMMX", "XN--ZFR164B",
        "XXX", "XYZ", "YE", "YT", "ZA", "ZM", "ZONE", "ZW"
    ]

    # assume we have a domain now
    if s.find(".") == -1:
        return False
    if s[s.rfind(".") + 1:].upper() not in tlds:
        return False
    for c in s[:s.rfind(".")]:
        if c not in ascii_lowercase + ascii_uppercase + digits + ":.-":
            return False
    return True
Ejemplo n.º 54
0
#    (range2d_pattern  , ExcelFormulaParser.RANGE2D),
    (ref2d_r1c1_pattern, ExcelFormulaParser.REF2D_R1C1),
    (ref2d_pattern    , ExcelFormulaParser.REF2D),
    (true_pattern     , ExcelFormulaParser.TRUE_CONST),
    (false_pattern    , ExcelFormulaParser.FALSE_CONST),
    (if_pattern       , ExcelFormulaParser.FUNC_IF),
    (choose_pattern   , ExcelFormulaParser.FUNC_CHOOSE),
    (name_pattern     , ExcelFormulaParser.NAME),
    (quotename_pattern, ExcelFormulaParser.QUOTENAME),
    (ne_pattern,        ExcelFormulaParser.NE),
    (ge_pattern,        ExcelFormulaParser.GE),
    (le_pattern,        ExcelFormulaParser.LE),
)

_re = recompile(
    '(' + ')|('.join([i[0] for i in pattern_type_tuples]) + ')',
    VERBOSE+LOCALE+IGNORECASE)

_toktype = [None] + [i[1] for i in pattern_type_tuples]
# need dummy at start because re.MatchObject.lastindex counts from 1

single_char_lookup = {
    '=': ExcelFormulaParser.EQ,
    '<': ExcelFormulaParser.LT,
    '>': ExcelFormulaParser.GT,
    '+': ExcelFormulaParser.ADD,
    '-': ExcelFormulaParser.SUB,
    '*': ExcelFormulaParser.MUL,
    '/': ExcelFormulaParser.DIV,
    ':': ExcelFormulaParser.COLON,
    ';': ExcelFormulaParser.SEMICOLON,
Ejemplo n.º 55
0
#  STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
#  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
#  OF THE POSSIBILITY OF SUCH DAMAGE.


__rev_id__ = """$Id: ExcelFormulaLexer.py,v 1.4 2005/08/14 06:40:23 rvk Exp $"""


import sys
from antlr import EOF, CommonToken as Tok, TokenStream, TokenStreamException
import struct
import ExcelFormulaParser
from re import compile as recompile, match, LOCALE, UNICODE, IGNORECASE


int_const_pattern = recompile(r"\d+")
flt_const_pattern = recompile(r"\d*\.\d+(?:[Ee][+-]?\d+)?")
str_const_pattern = recompile(r'["][^"]*["]')
#range2d_pattern   = recompile(r"\$?[A-I]?[A-Z]\$?\d+:\$?[A-I]?[A-Z]\$?\d+")
ref2d_pattern     = recompile(r"\$?[A-I]?[A-Z]\$?\d+")
true_pattern      = recompile(r"TRUE", IGNORECASE)
false_pattern     = recompile(r"FALSE", IGNORECASE)
name_pattern      = recompile(r"[\.\w]+", LOCALE)

pattern_type_tuples = (
    (flt_const_pattern, ExcelFormulaParser.NUM_CONST),
    (int_const_pattern, ExcelFormulaParser.INT_CONST),
    (str_const_pattern, ExcelFormulaParser.STR_CONST),
#    (range2d_pattern  , ExcelFormulaParser.RANGE2D),
    (ref2d_pattern    , ExcelFormulaParser.REF2D),
    (true_pattern     , ExcelFormulaParser.TRUE_CONST),
Ejemplo n.º 56
0
def get_mobo():
    base_info = dmidecode(' | grep -A 6 \"Base Board\"')

    mkey = ''
    pkey = ''

    mb = recompile("^\s+Manufacturer:\s+([\w\-]+).*$")
    pr = recompile("^\s+Product\sName:\s+([\w\-]+).*$")
    for b in base_info:
        mmb = mb.match(b)
        mpr = pr.match(b)

        if mmb:
            mkey = mmb.group(1)
        if mpr:
            pkey = mpr.group(1)

    vmware_value = ''
    vmware_mb_type = ''
    vm_machine = False
    if not exists (VM_MOBO):
        # Could not find the VM_MOBO file
        # This means its the first attempt to get the MOBO string
        # Check to see if the BIOS says its a VM
        # If VM, run vmware-rpctool to figure out if its a BOB or VSH
        # mkey == 'Intel' and pkey == '440BX' is the Manufacture and Product 
        # name for a ESXi VM based product
        if mkey == 'Intel' and pkey == '440BX':
            try:
                vmware_value = vmwarerpctool("'info-get guestinfo.mobo'")[0]
                vmware_mb_type = vmwarerpctool("'info-get guestinfo.mobotype'")[0]
            except:
                # Could not execute the vmware-rpctool call
                # or the vmwarerpctool call returned 1 as there 
                # is no guestinfo.MOBO variable in the VSH vmx file
                vmware_value = 'VM'

            vm_machine = True

    else:
        # If BOB platform, the value is set at mfg time, else None
        (vmware_value, vmware_mb_type) = read_vm_model_file()
        if vmware_value != None and vmware_value != '':
            vm_machine = True

    for m in mobos:
        for k in m.name_keys:
            if vm_machine:
                if m.part_num == vmware_value and m.isbob == "true":
                    if vmware_mb_type != "":
                        if m.type != vmware_mb_type.strip():
                            continue
                    # Create the model file if it does not exist
                    create_vm_model_file(m.part_num, m.type)
                    return m

            man = "sent"
            ks =  k.split()
            if len(ks) >= 2:
                man = ks[0]
                prod = ks[1]
            else:
                prod = ks[0]

            # If virtual flag set, 
            # Virtual model motherboard config entry, skip it
            if m.virtual == "true" or m.isbob == "true":
                continue

            if man == "sent":
                if prod.upper() == pkey.upper():
                    if check_backplane_keys(m):
                        create_vm_model_file("", "")
                        return m
            else:
                if man.upper() == mkey.upper() and \
                        prod.upper() == pkey.upper():
                    if check_backplane_keys(m):
                        create_vm_model_file("", "")
                        # For Redfin motherboards we are using the GPIO's to figure out
                        # what the motherboard type is
                        # The PCI register to read is in the config file. 
                        # GPIO pin 56 is the one which changes state. For 1U off SB
                        # the pin is set to zero, For 1U|2U off LSI, the pin is set to 1
                        # the mask and the result are defined in the config file as well
                        if m.use_gpio != "":
                            if int(m.gpio_mask, 16) & \
                               int(read_pci_register(get_smbus_pattern(), m.use_gpio), 16) \
                               != int(m.gpio_res):
                                # GPIO pin does not match what is expected, this isn't
                                # the right motherboard, move on.
                                continue

                        if m.redfin_keys:
                            if len(m.redfin_keys) > 0:
                                # If we reached here and the box is a LSI based redfin
                                # Now we need to distinguish if it is a 1U or a 2U
                                # Check the backplane of the box for the correct key.
                                # NOTE: The above method of checking backplane_keys 
                                # wont work on redfins as the redfin EEPROM is not
                                # ipmitool readable. Hence we just skip that method
                                # and use this check here.
                                if not match_redfin_backplane(m):
                                    continue
                        return m

    #if we get here, we've failed, so exit cleanly.
    print "Couldn't find motherboard."
    exit(1)
Ejemplo n.º 57
0
 def __init__(self, name, depth, mother=""):
     self.name = name
     self.compname = recompile(name)
     self.mother = mother
     self.depth = depth
Ejemplo n.º 58
0
import logging
import warnings
warnings.filterwarnings("ignore")

# ========= [ Customs ]

#Paramiko logs
paramiko.util.log_to_file("filename.log")

# ========= [ Statics ]

# deeeeeeee BUG
debug = True

# Regex pattern for IP addresses (simple pattern)
ipRe = recompile("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")


class tunnel():
    """
    SSH tunnel object. Instantiated object is passed to a command module, and
    the command module operates on that specific tunnel. (Facilitates quicker
    command handling as well as the possibility of multithreading.)
    """
    def __init__(self,
                 mgmtip,
                 name=False,
                 user="******",
                 password="******",
                 inputPort=22):
        self.mgmt = mgmtip
Ejemplo n.º 59
0
#    (range2d_pattern  , ExcelFormulaParser.RANGE2D),
    (ref2d_r1c1_pattern, ExcelFormulaParser.REF2D_R1C1),
    (ref2d_pattern    , ExcelFormulaParser.REF2D),
    (true_pattern     , ExcelFormulaParser.TRUE_CONST),
    (false_pattern    , ExcelFormulaParser.FALSE_CONST),
    (if_pattern       , ExcelFormulaParser.FUNC_IF),
    (choose_pattern   , ExcelFormulaParser.FUNC_CHOOSE),
    (name_pattern     , ExcelFormulaParser.NAME),
    (quotename_pattern, ExcelFormulaParser.QUOTENAME),
    (ne_pattern,        ExcelFormulaParser.NE),
    (ge_pattern,        ExcelFormulaParser.GE),
    (le_pattern,        ExcelFormulaParser.LE),
)

_re = recompile(
    '(' + ')|('.join([i[0] for i in pattern_type_tuples]) + ')',
#    VERBOSE+LOCALE+IGNORECASE)
    VERBOSE+IGNORECASE)

_toktype = [None] + [i[1] for i in pattern_type_tuples]
# need dummy at start because re.MatchObject.lastindex counts from 1

single_char_lookup = {
    '=': ExcelFormulaParser.EQ,
    '<': ExcelFormulaParser.LT,
    '>': ExcelFormulaParser.GT,
    '+': ExcelFormulaParser.ADD,
    '-': ExcelFormulaParser.SUB,
    '*': ExcelFormulaParser.MUL,
    '/': ExcelFormulaParser.DIV,
    ':': ExcelFormulaParser.COLON,
    ';': ExcelFormulaParser.SEMICOLON,
Ejemplo n.º 60
0
def get_system():
    spacer = '_______________________________'

    dmi = dmidecode()

    #information categories
    syt = recompile("System Information")
    mbo = recompile("Base Board Information")
    pro = recompile("Processor Information")
    cha = recompile("Chassis Information")
    mem = recompile("Use: System Memory")

    #subheadings 
    mfgr = recompile("Manufacturer: ([\s\w\.]+)")
    prod = recompile("Product Name: ([\s\w\-\/]+)")
    sped = recompile("Max Speed: ([\s\w]+)")
    capa = recompile("Maximum Capacity: ([\s\w]+)")

    looking_for = ''
    sys_vend, sys_prod, mbo_vend, mbo_prod = '', '', '', ''
    proc_vend, proc_speed, chas_vend, mem_cap = '', '', '', ''

    for line in dmi:
        #determine where we are
        if looking_for == '':
            if syt.search(line):
                looking_for = 'sys'
            elif mbo.search(line):
                looking_for = 'mbo'
            elif pro.search(line):
                looking_for = 'pro'
            elif cha.search(line):
                looking_for = 'cha'
            elif mem.search(line):
                looking_for = 'mem'
        else:
            if looking_for == 'sys':
                v = mfgr.search(line)
                p = prod.search(line)

                if v:
                    sys_vend = v.group(1)
                if p:
                    sys_prod = p.group(1)

                if sys_prod != '' and sys_vend != '':
                    looking_for = ''
            elif looking_for == 'mbo':
                v = mfgr.search(line)
                p = prod.search(line)

                if v:
                    mbo_vend = v.group(1)
                if p:
                    mbo_prod = p.group(1)

                if mbo_prod != '' and mbo_vend != '':
                    looking_for = ''
            elif looking_for == 'pro':
                v = mfgr.search(line)
                s = sped.search(line)

                if v:
                    proc_vend = v.group(1)
                if s:
                    proc_speed = s.group(1)

                if proc_vend != '' and proc_speed != '':
                    looking_for = ''
            elif looking_for == 'cha':
                v = mfgr.search(line)

                if v:
                    chas_vend = v.group(1)
                    looking_for = ''
            elif looking_for == 'mem':
                c = capa.search(line)

                if c:
                    mem_cap = c.group(1)
                    looking_for = ''

    print spacer
    print "System  "
    print " -     Vendor : %s" % sys_vend
    print " -    Product : %s" % sys_prod
    print spacer
    print "Motherboard  "
    print " -     Vendor : %s" % mbo_vend
    print " -    Product : %s" % mbo_prod
    print spacer
    print "Processor  "
    print " -     Vendor : %s" % proc_vend
    print " -  Max Speed : %s" % proc_speed #useless
    print spacer
    print "Chassis  "
    print " -     Vendor : %s" % chas_vend
    print spacer
    print "Memory "
    print " -   Capacity : %s" % mem_cap
    print spacer