Example #1
0
def fetch_devices_for_host(host):
    """A successful search returns a list of theano devices' string values.
    An unsuccessful search raises a KeyError.

    The (decreasing) priority order is:
    - PLATOON_DEVICES
    - PLATOONRC files (if they exist) from right to left
    - working directory's ./.platoonrc
    - ~/.platoonrc

    """
    # first try to have PLATOON_DEVICES
    if PLATOON_DEVICES:
        splitter = shlex.shlex(PLATOON_DEVICES, posix=True)
        splitter.whitespace += ','
        splitter.whitespace_split = True
        return list(splitter)

    # next try to find it in the config file
    try:
        try:
            devices = platoon_cfg.get("devices", host)
        except ConfigParser.InterpolationError:
            devices = platoon_raw_cfg.get("devices", host)
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        raise KeyError(host)
    splitter = shlex.shlex(devices, posix=True)
    splitter.whitespace += ','
    splitter.whitespace_split = True
    return list(splitter)
Example #2
0
def get_new_data(equity, last_date):
    date_splitter = shlex.shlex(last_date, posix=True)
    date_splitter.whitespace += '-'
    date_splitter.whitespace_split = True
    date = list(date_splitter)
    last_date = str(date[2]) + '/' + str(date[1]) + '/' + str(date[0]) 

    url = "http://www.bse-sofia.bg/graphics/phpfiles/MYgethistoDeA.php?MonCode=" + equity + "&MonPays=BE&Periode=1&De=" + last_date + "&A=" + datetime.date.today().strftime("%d/%m/%Y")     
    file_trades = urllib2.urlopen(url)
    tradedays = file_trades.readline()
    np_data = np.empty((0, 7))
    for line in file_trades: 
        if not line.strip():
            continue
        # split main line
        main_splitter = shlex.shlex(line.strip(), posix=True)
        main_splitter.whitespace += ';'
        main_splitter.whitespace_split = True
        trade = list(main_splitter)
        # if any trading for this day
        if trade[1] != 'N':
            # split date
            date_splitter = shlex.shlex(trade[0], posix=True)
            date_splitter.whitespace += '/'
            date_splitter.whitespace_split = True
            date = list(date_splitter)
            # "Date" , "Open", "High", "Low", "Close", Volumes"
            # 2012-12-21,10000,14046.26,1.423,1.4,1.4,1.423
            np_data = np.append(np_data, [[date[2] + "-" + date[1] + "-" + date[0], float(trade[1]) / 100, float(trade[2]) / 100, float(trade[3]) / 100, float(trade[4]) / 100, -1, trade[5]]], axis=0)
    return pand.DataFrame(data=np_data[:, 1:], index=np_data[:, 0], columns=("Open", "High", "Low", "Close", "AdjClose", "Volumes"), dtype=float)
Example #3
0
def fetch_hosts():
    """A successful search returns a list of host to participate in a multi-node
    platoon. An unsuccessful search raises a KeyError.

    The (decreasing) priority order is:
    - PLATOON_HOSTS
    - PLATOONRC files (if they exist) from right to left
    - working directory's ./.platoonrc
    - ~/.platoonrc

    """
    # first try to have PLATOON_HOSTS
    if PLATOON_HOSTS:
        splitter = shlex.shlex(PLATOON_HOSTS, posix=True)
        splitter.whitespace += ','
        splitter.whitespace_split = True
        return list(splitter)

    # next try to find it in the config file
    try:
        try:
            hosts = platoon_cfg.get("platoon", "hosts")
        except ConfigParser.InterpolationError:
            hosts = platoon_raw_cfg.get("platoon", "hosts")
    except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
        raise KeyError("hosts")
    splitter = shlex.shlex(hosts, posix=True)
    splitter.whitespace += ','
    splitter.whitespace_split = True
    return list(splitter)
Example #4
0
    def get_cmdline(self, instruction):
        """Execute with special instructions.

        EXAMPLE instruction (Powershell):
        powershell -ExecutionPolicy Unrestricted $plugin_name $plugin_args

        EXAMPLE instruction (VBS):
        wscript $plugin_name $plugin_args

        """
        command = []

        # Set shlex to use posix mode on posix machines (so that we can pass something like
        # --metric='disk/logical/|' and have it properly format quotes)
        mode = False
        if os.name == 'posix':
            mode = True
        
        lexer = shlex.shlex(instruction, posix=mode)
        lexer.whitespace_split = True

        for x in lexer:
            if '$plugin_name' in x:
                replaced = x.replace('$plugin_name', self.plugin_abs_path)
                command.append(replaced)
            elif '$plugin_args' == x:
                if self.arguments:
                    args = shlex.shlex(' '.join(self.arguments), posix=mode)
                    args.whitespace_split = True
                    for a in args:
                        command.append(a)
            else:
                command.append(x)
        return command
Example #5
0
def split(string, maxsplit=-1):
    """ Split a string with shlex when possible, and add support for maxsplit. """
    if maxsplit == -1:
        try:
            split_object = shlex.shlex(string, posix=True)
            split_object.quotes = '"`'
            split_object.whitespace_split = True
            split_object.commenters = ""
            return list(split_object)
        except ValueError:
            return string.split()

    split_object = shlex.shlex(string, posix=True)
    split_object.quotes = '"`'
    split_object.whitespace_split = True
    split_object.commenters = ""
    maxsplit_object = []
    splits = 0

    while splits < maxsplit:
        maxsplit_object.append(next(split_object))

        splits += 1

    maxsplit_object.append(split_object.instream.read())

    return maxsplit_object
Example #6
0
def split_ids(q):
    '''split input query string into list of ids.
       any of " \t\n\x0b\x0c\r|,+" as the separator,
        but perserving a phrase if quoted
        (either single or double quoted)
        more detailed rules see:
        http://docs.python.org/2/library/shlex.html#parsing-rules
        e.g. split_ids('CDK2 CDK3') --> ['CDK2', 'CDK3']
             split_ids('"CDK2 CDK3"\n CDk4')  --> ['CDK2 CDK3', 'CDK4']
    '''
    # Python3 strings are already unicode, .encode
    # now returns a bytearray, which cannot be searched with
    # shlex.  For now, do this terrible thing until we discuss
    if sys.version_info.major == 3:
        lex = shlex(q, posix=True)
    else:
        lex = shlex(q.encode('utf8'), posix=True)
    lex.whitespace = ' \t\n\x0b\x0c\r|,+'
    lex.whitespace_split = True
    lex.commenters = ''
    if sys.version_info.major == 3:
        ids = [x.strip() for x in list(lex)]
    else:
        ids = [x.decode('utf8').strip() for x in list(lex)]
    ids = [x for x in ids if x]
    return ids
Example #7
0
def _expand_args(command):
    """Parses command strings and returns a Popen-ready list."""

    # Prepare arguments.
    if isinstance(command, STR_TYPES):
        if sys.version_info[0] == 2:
            splitter = shlex.shlex(command.encode('utf-8'))
        elif sys.version_info[0] == 3:
            splitter = shlex.shlex(command)
        else:
            splitter = shlex.shlex(command.encode('utf-8'))
        splitter.whitespace = '|'
        splitter.whitespace_split = True
        command = []

        while True:
            token = splitter.get_token()
            if token:
                command.append(token)
            else:
                break

        command = list(map(shlex.split, command))

    return command
Example #8
0
 def testPunctuationWithPosix(self):
     """Test that punctuation_chars and posix behave correctly together."""
     # see Issue #29132
     s = shlex.shlex('f >"abc"', posix=True, punctuation_chars=True)
     self.assertEqual(list(s), ['f', '>', 'abc'])
     s = shlex.shlex('f >\\"abc\\"', posix=True, punctuation_chars=True)
     self.assertEqual(list(s), ['f', '>', '"abc"'])
Example #9
0
    def parse(self, stream):
        if isinstance(stream, unicode):
            stream = stream.encode('utf-8')
        # Convert stream to StringIO if necessary
        stream = shlex.shlex(stream).instream
        # Create lexer, no data by default
        lex = shlex.shlex("", posix=True)
        lex.wordchars += "!$%&()*+-./:x<=>?@:"
        lex.whitespace_split = False
        lex.quotes = '"'

        # Feed one line at the time
        lines = []
        prevline = []
        for raw in stream:
            lex.instream = shlex.StringIO(raw)
            lex.state = ' '
            newline = list(lex)
            withContinuation = (newline[-1] == '\n') if newline else False
            if withContinuation:
                newline.pop()
            if prevline:
                prevline.extend(newline)
                if not withContinuation:
                    prevline = []
                continue
            if withContinuation:
                prevline = newline
            lines.append(newline)
        # Filter out empty lines
        for line in lines:
            if not line:
                continue
            self.parseLine(line)
        return self
Example #10
0
 def __init__(self, instream=None):
     if instream is not None:
         self._lexer = shlex.shlex(instream, posix=True)
     else:
         self._lexer = shlex.shlex(posix=True)
     self._lexer.whitespace += ','
     self._lexer.wordchars += './()*-$'
     self.eof = self._lexer.eof
Example #11
0
	def parse_load(self, context, line, cursor):
		if context.template:
			self.warning(context, "load directives are not expected inside template definitions")

		cursor, _ = self.parse_space(context, line, cursor)
		cursor, path = self.parse_quoted(context, "'\"", "", line, cursor)
		if not path:
			raise Parse_Exception(context, "expected load path")

		cursor, _ = self.parse_literal_req(context, "as", line, cursor)
		cursor, alias = self.parse_identifier_req(context, line, cursor)

		cursor, _ = self.parse_space(context, line, cursor)
		params = []
		cursor, params_text = self.parse_paren(context, "", line, cursor)

		param_map = {
			"path" : path,
			"abs" : False,
			"directive_token" : self.directive_token,
			"placeholder" : self.placeholder
		}

		if params_text:
			# split by comma, preserve quotes
			comma_lex = shlex.shlex(params_text)
			comma_lex.whitespace_split = True
			comma_lex.whitespace = ","
			comma_lex.commenters = ""

			for param in comma_lex:
				# split by eq, strip quotes
				eq_lex = shlex.shlex(param, posix = True)
				eq_lex.whitespace_split = True
				eq_lex.whitespace += "="
				eq_lex.commenters = ""
				pair = list(eq_lex)
				key = pair[0].strip()
				value = True

				if len(pair) > 1:
					value = pair[1].strip()

				param_map[key] = value

		load = Template_Load()
		load.alias = alias
		load.path = param_map["path"]
		load.abs = param_map["abs"]
		load.directive_token = param_map["directive_token"]
		load.placeholder = param_map["placeholder"]

		self.trace(context,
			"load directive: {} as {}, abs({}), directive_token({}), placeholder({})".format(
			load.path, alias, load.abs, load.directive_token, load.placeholder))

		context.module.template_loads.append(load)
Example #12
0
    def parse(cls, string, context=None):
        """Parse a command string as usually is typed in console
            to create a :class`Planner`.

        :param `string`: the command line search to be parsed, using the
            mole search syntax.
        :param `context`: a dictionary to use as context for all actions.
            Usually you want to use context to pass arguments to actions in
            execution time.
        """
        ret = cls()
        shl = shlex.shlex(string)
        shl.wordchars += ("\t "+RESERVED)
        shl.whitespace = '|'

        if context is None:
            context = AttrDict({})

        for cmd in shl:
            cmd = " ".join(filter(lambda x:x not in KEYWORDS, cmd.split()))

            if len(cmd) == 0:
                continue

            args = []
            kwargs = {}

            ishl = shlex.shlex(cmd)
            ishl.wordchars += RESERVED

            cmd = [ x.strip("\"").strip("'") for x in ishl ]

            for arg in cmd[1:]:
                if "=" in arg:
                    kwargs.update(dict([tuple(arg.split("="))]))
                elif "," in arg:
                    args.append(arg.split(","))
                else:
                    args.append([arg])

            x = Action.from_type(cmd[0], *args, **kwargs)
            x.context = AttrDict(context)
            ret.add(x)

        if isinstance(ret.queue[0], ActionInput) and x.context is not None:
            obj = ret.queue[0].get_object()
            if obj is not None:
                if "plotter" in obj:
                    ret.add(obj.plotter)
                else:
                    ret.add(Plotter.from_type("basic"))
                if "parser" in obj:
                    ret.add(obj.parser)
                else:
                    ret.add(Parser.from_type("basic"))

        return ret
Example #13
0
    def __init__( self, f, prompts=['~# ', '~$ '], com='', whi='' ):

        r"""
        The constructor takes a filename or an open file or a string
        as the shell session.

        The constructor sets the :attr:`tokens` member attribute with
        a shlex token stream initialised with the correct options for
        parsing comments and whitespace.
        
        The token commenters and whitespace are set to the empty string
        and can be modified with the function arguments 'com' and
        'whi'. 

        If the argument shlex_object is set to True then it'is not the
        list of tokens but the shlex object itself so that you can
        experiment with the :obj:`shlex` and it multiple attribute and
        method.

        >>> list(ShellSessionParser( "Yozza 1 2" ).tokens)
        ['Yozza', ' ', '1', ' ', '2']
        
        >>> tokens = ShellSessionParser( "Yozza 1 2").tokens
        >>> tokens.whitespace = ' '
        >>> list(tokens)
        ['Yozza', '1', '2']

        >>> list( ShellSessionParser("Yozza # a comment you dont want to see", whi=' ', com='#' ).tokens)
        ['Yozza']
        
        """
        
        self.tokens = shlex( f if hasattr(f, "read") else StringIO( f )) 
        self.tokens.commenters = com 
        # deactivate shlex comments facility which won't work for us.
        # The terminating linefeed means two things: end of comment an
        # end of command. As shlex consume the terminating linefeed,
        # there is no end of command left.
    
        self.tokens.whitespace = whi
        # deactivate shlex whitespace munging. characters cited in
        # ``shlex.whitespace`` are not returned by get_token. If
        # empty, whitespaces are returned as they are which is what we
        # want: they definitely count in bash, and may count in
        # output, so we just want to keep them as they are.

        self.nested = 0

        self.prompts = []
        for p in prompts:
            s=shlex(p)
            s.commenters, s.whitespace = com, whi
            self.prompts.append( list( s ) )

        self.max_prompt_len = max([ len(p) for p in self.prompts ])
Example #14
0
 def testEmptyStringHandling(self):
     """Test that parsing of empty strings is correctly handled."""
     # see Issue #21999
     expected = ['', ')', 'abc']
     for punct in (False, True):
         s = shlex.shlex("'')abc", posix=True, punctuation_chars=punct)
         slist = list(s)
         self.assertEqual(slist, expected)
     expected = ["''", ')', 'abc']
     s = shlex.shlex("'')abc", punctuation_chars=True)
     self.assertEqual(list(s), expected)
Example #15
0
 def testPunctuationWithWhitespaceSplit(self):
     """Test that with whitespace_split, behaviour is as expected"""
     s = shlex.shlex('a  && b  ||  c', punctuation_chars='&')
     # whitespace_split is False, so splitting will be based on
     # punctuation_chars
     self.assertEqual(list(s), ['a', '&&', 'b', '|', '|', 'c'])
     s = shlex.shlex('a  && b  ||  c', punctuation_chars='&')
     s.whitespace_split = True
     # whitespace_split is True, so splitting will be based on
     # white space
     self.assertEqual(list(s), ['a', '&&', 'b', '||', 'c'])
Example #16
0
def stream_video(url, open_chat):
    global screen, play_process, stream_quality, stream_chat, DEVNULL

    os.chdir(files_path)
    command = stream_command + " " + url + " " + stream_quality
    if stream_player != "":
        command = command + " --player " + stream_player

    lex = shlex.shlex(command)
    lex.whitespace_split = True
    args = list(lex)
    try:
        screen.clear()
        screen.refresh()

        set_status("starting stream")

        if stream_chat and open_chat:
            chat_command = "xdg-open " + url + "/chat"
            chat_lex = shlex.shlex(chat_command)
            chat_lex.whitespace_split = True
            chat_args = list(chat_lex)

            chat_process = subprocess.Popen(chat_args, stderr=DEVNULL)
            chat_process.wait()

        lex = shlex.shlex(command)
        lex.whitespace_split = True
        args = list(lex)

        play_process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
        while True:
            line = play_process.stdout.readline()
            if not line:
                break
            set_status(line.decode("utf-8"))
            logging.debug(line)

        play_process.wait()

        set_status("finished stream")
        play_process = None

        restore_state()

    except:

        set_status("playback failed")
        play_process = None

        restore_state()
        return 1

    return 0
Example #17
0
def get_adj_close_data(equity):
    url2 = "http://www.bse-sofia.bg/?page=ExportData&target=security&code=" + equity 
    print url2
    fl_url = urllib2.urlopen(url2)
    fl_url.read(3)  # read BOM
    trades = list()
    sym = fl_url.read(1)
    day_data = str()
    while sym:
        if sym == '\n':
            trades.append(day_data)
            day_data = str()
        elif sym == 'E':
            of = fl_url.read(4)
            if of == '\x00O\x00F':
                trades.append(day_data)
                day_data = str()
                break;
            day_data += sym
            day_data == of
        else:
            day_data += sym
        sym = fl_url.read(1)
    if day_data:
        trades.append(day_data)
    
    np_data = np.empty((0, 7))
    for trade in trades:
        trade = trade[:-3]  # remove \x00\r\x00 at the end
        trade, b = decodefunc(trade)
        trade = trade.strip()
        if not trade:
            continue
    # split main line
        main_splitter = shlex.shlex(trade, posix=True)
        main_splitter.whitespace += ';'
        main_splitter.whitespace_split = True
        trade = list(main_splitter)
        # ['13.11.11', '3.348', '3.379', '9129', '11.51', '10.86', '8.17', '5.23', '1.85', '.59', '1.15', '1.1']
        # split date
        # trade[0] date
        # trade[1] adjusted close
        # trade[2] close
        # trade[3] volume
        if trade:
            date_splitter = shlex.shlex(trade[0], posix=True)
            date_splitter.whitespace += '.'
            date_splitter.whitespace_split = True
            date = list(date_splitter)
            date = str('20' + str(date[0] + '-' + date[1]) + '-' + str(date[2])) 
            np_data = np.append(np_data, values=[[date, float(0), float(0), float(0), float(trade[2]), float(trade[1]), float(trade[3])]], axis=0)
    fl_url.close()
    return pand.DataFrame(data=np_data[:, 1:], index=np_data[:, 0], columns=("Open", "High", "Low", "Close", "AdjClose", "Volumes"), dtype=float) 
Example #18
0
    def __init__(self, path, data=None):
        ConsoleP.__init__(self, 'pkgbuild')

        if path is None:
            self.eval = shlex.shlex(data, posix=True)
        else:
            self.eval = shlex.shlex(open(path, "r").read(), posix=True)

        self.eval.whitespace = " \t"
        self.eval.wordchars += '.:/${}-~'
        self.eval.commenters = '#'
        self.state = self.ST_VAR
        self.scope = Scope()

        self.parse()
Example #19
0
    def rename_window(self, new_name):
        """Return :class:`Window` object ``$ tmux rename-window <new_name>``.

        :param new_name: name of the window
        :type new_name: string

        """

        import shlex
        lex = shlex.shlex(new_name)
        lex.escape = ' '
        lex.whitespace_split = False

        try:
            self.cmd(
                'rename-window',
                new_name
            )
            self['window_name'] = new_name
        except Exception as e:
            logger.error(e)

        self.server._update_windows()

        return self
Example #20
0
    def processcommand(cls, reader, command):
        posargs = getattr(reader, "posargs", None)

        # Iterate through each word of the command substituting as
        # appropriate to construct the new command string. This
        # string is then broken up into exec argv components using
        # shlex.
        newcommand = ""
        for word in CommandParser(command).words():
            if word == "{posargs}" or word == "[]":
                if posargs:
                    newcommand += " ".join(posargs)
                continue
            elif word.startswith("{posargs:") and word.endswith("}"):
                if posargs:
                    newcommand += " ".join(posargs)
                    continue
                else:
                    word = word[9:-1]
            new_arg = ""
            new_word = reader._replace(word)
            new_word = reader._replace(new_word)
            new_arg += new_word
            newcommand += new_arg

        # Construct shlex object that will not escape any values,
        # use all values as is in argv.
        shlexer = shlex.shlex(newcommand, posix=True)
        shlexer.whitespace_split = True
        shlexer.escape = ''
        shlexer.commenters = ''
        argv = list(shlexer)
        return argv
Example #21
0
 def parse(self, path):
     errs = 0
     def check(name, mapping, listname):
         if name in mapping:
             self.ui.warn(_('%s:%d: %r already in %s list\n') %
                          (lex.infile, lex.lineno, name, listname))
             return 1
         return 0
     lex = shlex.shlex(open(path), path, True)
     lex.wordchars += '!@#$%^&*()-=+[]{}|;:,./<>?'
     cmd = lex.get_token()
     while cmd:
         if cmd == 'include':
             name = lex.get_token()
             errs += check(name, self.exclude, 'exclude')
             self.include[name] = name
         elif cmd == 'exclude':
             name = lex.get_token()
             errs += check(name, self.include, 'include')
             errs += check(name, self.rename, 'rename')
             self.exclude[name] = name
         elif cmd == 'rename':
             src = lex.get_token()
             dest = lex.get_token()
             errs += check(src, self.exclude, 'exclude')
             self.rename[src] = dest
         elif cmd == 'source':
             errs += self.parse(lex.get_token())
         else:
             self.ui.warn(_('%s:%d: unknown directive %r\n') %
                          (lex.infile, lex.lineno, cmd))
             errs += 1
         cmd = lex.get_token()
     return errs
	def get_params(self,input):
		s= shlex.shlex(input,self.posix)
		s.wordchars = self.wordchars
		s.whitespace_split=self.whitespace_split
		argvs=[]
		kwargvs={}
		c=self.__get_token(s)
		while True:
			if not c:
				break
			n=self.__get_token(s)
			if not n:
				argvs.append(c)
				break
			if n==':':
				v=self.__get_token(s)
				if not v:
					raise Exception('arg [%s] value can not be empty.' % c)
				kwargvs[c]=v
				c=self.__get_token(s)
			else:
				argvs.append(c)
				c=n
		self.argvs=argvs
		self.kwargvs=kwargvs
		return (argvs,kwargvs)
Example #23
0
def _release_from_os_release(fn):
    """Identify the installation of a Linux distribution via /etc/os-release.

    Attempt to identify the installation of a Linux distribution via
    /etc/os-release.  This file must already have been verified to exist
    and be readable.

    :param fn: an open filehandle on /etc/os-release
    :type fn: filehandle
    :returns: The distribution's name and version, or None for either or both
    if they cannot be determined
    :rtype: (string, string)
    """
    rel_name = None
    rel_ver = None

    with open(fn, "r") as f:
        parser = shlex.shlex(f)

        while True:
            key = parser.get_token()
            if key == parser.eof:
                break
            elif key == "NAME":
                # Throw away the "=".
                parser.get_token()
                rel_name = parser.get_token().strip("'\"")
            elif key == "VERSION_ID":
                # Throw away the "=".
                parser.get_token()
                rel_ver = parser.get_token().strip("'\"")

    return rel_name, rel_ver
Example #24
0
    def IARunCmd(self, data):
        import shlex
        lex = shlex.shlex(data)
        lex.quotes = '"'
        lex.whitespace_split = True
        cmd = list(lex)
        reQuoting = re.compile('\s*"(.*)"', re.DOTALL)
        for i in range(0, len(cmd)):
            m = re.match(reQuoting, cmd[i])
            if m:
                cmd[i] = m.group(1)
        funname = cmd[0]
        i_arg = cmd[1:]
        fun = self.__getattribute__(funname)
        import inspect
        (def_args, def_varargs, def_keywords, def_defaults) = inspect.getargspec(fun)

        def_args = def_args[1:]
        if def_defaults != None:
            real_vars = list(def_defaults)
        else:
            real_vars = []

        def_len = len(def_args)

        while len(real_vars) < def_len:
            real_vars.insert(0, None)
        index = 0
        for a in i_arg:
            real_vars[index] = a
            index += 1

        response = fun(*real_vars)
        return response
Example #25
0
	def __init__(self, fn, args):
		"""Here, the arguments passed to Urutu method is checked.
		Different paths are taken from here.
		1. Two lists represent usage of both threads and blocks
		2. One list represent usage of only threads
		3. String "retstr" represents returning the generated kernel.
		Note that it does not work for data parallel methods
		4. No extra agruments passed. Represents data parallel approach"""
		stri = inspect.getsource(fn)
		sh = shlex.shlex(stri)
		self.code = stri
		if type(args[0]) is list and type(args[1]) is not list:
			self.threads = args[0]
			self.args = args[1:]
		elif type(args[0]) is list and type(args[1]) is list:
			self.threads = args[0]
			self.blocks = args[1]
			self.args = args[2:]
		elif args[0] == "retstr":
			self.return_kernel = True
			self.args = args[1:]
		else:
			self.data_parallel = True
			self.args = args
		self.typeargs()
Example #26
0
    def _parse_target(self, aci):
        lexer = shlex.shlex(aci.encode('utf-8'))
        lexer.wordchars = lexer.wordchars + "."

        l = []

        var = False
        op = "="
        for token in lexer:
            # We should have the form (a = b)(a = b)...
            if token == "(":
                var = lexer.next().strip()
                operator = lexer.next()
                if operator != "=" and operator != "!=":
                    # Peek at the next char before giving up
                    operator = operator + lexer.next()
                    if operator != "=" and operator != "!=":
                        raise SyntaxError("No operator in target, got '%s'" % operator)
                op = operator
                val = lexer.next().strip()
                val = self._remove_quotes(val)
                end = lexer.next()
                if end != ")":
                    raise SyntaxError('No end parenthesis in target, got %s' % end)

            if var == 'targetattr':
                # Make a string of the form attr || attr || ... into a list
                t = re.split('[^a-zA-Z0-9;\*]+', val)
                self.target[var] = {}
                self.target[var]['operator'] = op
                self.target[var]['expression'] = t
            else:
                self.target[var] = {}
                self.target[var]['operator'] = op
                self.target[var]['expression'] = val
Example #27
0
def shlex_quotes(value):
    '''see http://stackoverflow.com/questions/6868382/python-shlex-split-ignore-single-quotes'''
    lex = shlex.shlex(value)
    lex.quotes = '"'
    lex.whitespace_split = True
    lex.commenters = ''
    return list(lex)
Example #28
0
 def __enter__(self):
     self.instream = open(self.path)
     self.lex = shlex.shlex(self.instream, self.path, posix=True)
     self.lex.whitespace_split = True
     self.error_leader = self.lex.error_leader()
     self.keywords = {}
     return self
Example #29
0
def smartsplit(string, sep):
    """Split while allowing escaping.

    So far, this seems to do what I expect - split at the separator,
    allow escaping via \, and allow the backslash itself to be escaped.

    One problem is that it can raise a ValueError when given a backslash
    without a character to escape. I'd really like a smart splitter
    without manually scan the string. But maybe that is exactly what should
    be done.
    """
    assert string is not None   # or shlex will read from stdin
    if not six.PY3:
        # On 2.6, shlex fails miserably with unicode input
        is_unicode = isinstance(string, unicode)
        if is_unicode:
            string = string.encode('utf8')
    l = shlex.shlex(string, posix=True)
    l.whitespace += ','
    l.whitespace_split = True
    l.quotes = ''
    if not six.PY3 and is_unicode:
        return map(lambda s: s.decode('utf8'), list(l))
    else:
        return list(l)
Example #30
0
	def body(self):
		for sentence in self.sentences:
			if sentence.split('\t')>1:
				phrase = sentence.split('\t')
				tabs = phrase.count('')
#				print "Inside Body",phrase, tabs
				sh = shlex.shlex(phrase[-1])
				i = sh.get_token()
#				print i
				if i == "def":
#					print "In DEF"
#					print self.device_py
					self.is_device_code = True
					self.is_defined_device.append(False)
					self.device_tab = tabs
					self.device_py.append([i])
					if self.device_py[0] == []:
						self.device_py.pop(0)
					i = sh.get_token()
					self.device_func_name.append(i)
#					print "DEC",self.device_func_name, self.device_py
					while i is not sh.eof:
						self.device_py[-1].append(i)
						i = sh.get_token()
					self.device_sentences.append([])
				elif self.device_tab < tabs and self.is_device_code == True:
					for j in phrase[tabs:]:
						self.device_sentences[-1].append(j)
					if self.device_sentences[0] == []:
						self.device_sentences.pop(0)
#					print "Body!!", self.device_py, self.device_sentences
				else:
					self.kernel = self.inspect_it(sentence,self.kernel)
		return
Example #31
0
def shellsplit(s, posix=True):
    # posix= option to shlex.split first available in Python 2.6+
    lexer = shlex.shlex(s, posix=not IS_WINDOWS)
    lexer.whitespace_split = True
    lexer.commenters = ''
    return list(lexer)
Example #32
0
 def _parse(self, file, fp, default_netrc):
     lexer = shlex.shlex(fp)
     lexer.wordchars += '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
     lexer.commenters = lexer.commenters.replace('#', '')
     while 1:
         saved_lineno = lexer.lineno
         toplevel = tt = lexer.get_token()
         if not tt:
             break
         elif tt[0] == '#':
             if lexer.lineno == saved_lineno and len(tt) == 1:
                 lexer.instream.readline()
             continue
         elif tt == 'machine':
             entryname = lexer.get_token()
         elif tt == 'default':
             entryname = 'default'
         elif tt == 'macdef':
             entryname = lexer.get_token()
             self.macros[entryname] = []
             lexer.whitespace = ' \t'
             while 1:
                 line = lexer.instream.readline()
                 if not line or line == '\n':
                     lexer.whitespace = ' \t\r\n'
                     break
                 self.macros[entryname].append(line)
             continue
         else:
             raise NetrcParseError('bad toplevel token %r' % tt, file,
                                   lexer.lineno)
         login = ''
         account = password = None
         self.hosts[entryname] = {}
         while 1:
             tt = lexer.get_token()
             if tt.startswith('#') or tt in {
                     '', 'machine', 'default', 'macdef'
             }:
                 if password:
                     self.hosts[entryname] = login, account, password
                     lexer.push_token(tt)
                     break
                 else:
                     raise NetrcParseError(
                         'malformed %s entry %s terminated by %s' %
                         (toplevel, entryname, repr(tt)), file,
                         lexer.lineno)
             elif tt == 'login' or tt == 'user':
                 login = lexer.get_token()
             elif tt == 'account':
                 account = lexer.get_token()
             elif tt == 'password':
                 if os.name == 'posix' and default_netrc:
                     prop = os.fstat(fp.fileno())
                     if prop.st_uid != os.getuid():
                         import pwd
                         try:
                             fowner = pwd.getpwuid(prop.st_uid)[0]
                         except KeyError:
                             fowner = 'uid %s' % prop.st_uid
                         try:
                             user = pwd.getpwuid(os.getuid())[0]
                         except KeyError:
                             user = '******' % os.getuid()
                         raise NetrcParseError(
                             '~/.netrc file owner (%s) does not match current user (%s)'
                             % (fowner, user), file, lexer.lineno)
                     if prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO):
                         raise NetrcParseError(
                             '~/.netrc access too permissive: access permissions must restrict access to only the owner',
                             file, lexer.lineno)
                 password = lexer.get_token()
             else:
                 raise NetrcParseError('bad follower token %r' % tt, file,
                                       lexer.lineno)
def _shlex_split(command):
    lex = shlex.shlex(command, posix=True)
    lex.whitespace_split = True
    lex.escape = ''
    return list(lex)
Example #34
0
def get_declarations(src: str) -> Tuple[Variables, Functions]:
    """
    Extract all variables and functions defined by a Bash script.

    If a function or a variable is defined or assigned multiple times
    in the script, only the final value is extracted. The script must not
    output anything on the standard output stream.

    :param src: source string of the considered Bash string
    :returns: a tuple containing the declared variables and functions
    """
    src += """
declare -f
declare -p
"""
    env: Dict[str, str] = {}

    declarations_subshell = (
        subprocess.run(  # pylint:disable=subprocess-run-check
            ["/usr/bin/env", "bash"],
            input=src.encode(),
            capture_output=True,
            env=env,
        ))

    if declarations_subshell.returncode == 2:
        raise ScriptError(f"Bash syntax error\n\
{declarations_subshell.stderr.decode()}")

    if declarations_subshell.returncode != 0:
        raise ScriptError(f"Bash error\n\
{declarations_subshell.stderr.decode()}")

    declarations = declarations_subshell.stdout.decode()

    # Parse `declare` statements and function statements
    lexer = shlex.shlex(declarations, posix=True)
    lexer.wordchars = lexer.wordchars + "-"

    variables = {}
    functions = {}

    while True:
        token = lexer.get_token()

        if token == lexer.eof:
            break

        next_token = lexer.get_token()

        if token == "declare" and next_token[0] == "-":
            lexer.push_token(next_token)
            name, value = _parse_var(lexer)

            if name not in default_variables:
                variables[name] = value
        else:
            assert next_token == "("
            assert lexer.get_token() == ")"
            start, end = _parse_func(lexer)
            functions[token] = declarations[start:end]

    return variables, functions
Example #35
0
def PxarStartup(directory, verbosity):
    if not directory or not os.path.isdir(directory):
        print "Error: no or invalid configuration directory specified!"
        sys.exit(404)

    config = PxarConfigFile('%sconfigParameters.dat' %
                            (os.path.join(directory, "")))
    tbparameters = PxarParametersFile(
        '%s%s' % (os.path.join(directory, ""), config.get("tbParameters")))
    masks = PxarMaskFile('%s%s' %
                         (os.path.join(directory, ""), config.get("maskFile")))

    # Power settings:
    power_settings = {
        "va": config.get("va", 1.9),
        "vd": config.get("vd", 2.6),
        "ia": config.get("ia", 1.190),
        "id": config.get("id", 1.10)
    }

    tbmDACs = []
    for tbm in range(int(config.get("nTbms"))):
        for n in range(2):
            tbmparameters = PxarParametersFile(
                '%s%s' % (os.path.join(directory, ""),
                          config.get("tbmParameters") + "_C" + str(tbm) +
                          ("a" if n % 2 == 0 else "b") + ".dat"))
            tbmDACs.append(tbmparameters.getAll())

    print "Have DAC config for " + str(len(tbmDACs)) + " TBM cores:"
    for idx, tbmDAC in enumerate(tbmDACs):
        for key in tbmDAC:
            print "  TBM " + str(
                idx / 2) + ("a" if idx % 2 == 0 else "b"
                            ) + " dac: " + str(key) + " = " + str(tbmDAC[key])

    # init pixel list
    pixels = list()
    for column in range(0, 52):
        for row in range(0, 80):
            p = PixelConfig(column, row, 15)
            p.mask = False
            pixels.append(p)

    rocDacs = []
    rocPixels = list()
    rocI2C = []
    config_nrocs = config.get("nrocs").split()
    nrocs = int(config_nrocs[0])
    i2cs = [i for i in range(nrocs)]
    if len(config_nrocs) > 1:
        if config_nrocs[1].startswith('i2c'):
            i2cs = ' '.join(config_nrocs[2:])
            i2cs = [int(i) for i in i2cs.split(',')]
            print "Number of ROCs:", nrocs, "\b; Configured I2C's:", i2cs
    for roc in xrange(nrocs):
        if len(i2cs) > roc:
            i2c = i2cs[roc]
        else:
            i2c = roc
        dacconfig = PxarParametersFile(
            '%s%s_C%i.dat' %
            (os.path.join(directory, ""), config.get("dacParameters"), i2c))
        trimconfig = PxarTrimFile(
            '%s%s_C%i.dat' %
            (os.path.join(directory, ""), config.get("trimParameters"), i2c),
            i2c, masks.get())
        print "We have " + str(len(
            trimconfig.getAll())) + " pixels for ROC " + str(i2c)
        rocI2C.append(i2c)
        rocDacs.append(dacconfig.getAll())
        rocPixels.append(trimconfig.getAll())

    # set pgcal according to wbc
    pgcal = int(rocDacs[0]['wbc']) + 6

    # Pattern Generator for single ROC operation:
    if int(config.get("nTbms")) == 0:
        pg_setup = (("PG_RESR", 25), ("PG_CAL", pgcal), ("PG_TRG", 16),
                    ("PG_TOK", 0))
    else:
        pg_setup = (("PG_RESR", 15), ("PG_CAL", pgcal), ("PG_TRG", 0))

    # Start an API instance from the core pxar library
    api = PyPxarCore(usbId=config.get("testboardName"), logLevel=verbosity)
    print api.getVersion()
    if not api.initTestboard(pg_setup=pg_setup,
                             power_settings=power_settings,
                             sig_delays=tbparameters.getAll()):
        print "WARNING: could not init DTB -- possible firmware mismatch."
        print "Please check if a new FW version is available"
        exit

    hubid_splitter = shlex.shlex(config.get("hubId", 31))
    hubid_splitter.whitespace += ','
    hubids = list(hubid_splitter)
    api.initDUT(map(int, hubids), config.get("tbmType", "tbm08"), tbmDACs,
                config.get("rocType"), rocDacs, rocPixels, rocI2C)

    api.testAllPixels(True)
    print "Now enabled all pixels"

    print "pxar API is now started and configured."
    return api
Example #36
0
 def testPunctuationCharsReadOnly(self):
     punctuation_chars = "/|$%^"
     shlex_instance = shlex.shlex(punctuation_chars=punctuation_chars)
     self.assertEqual(shlex_instance.punctuation_chars, punctuation_chars)
     with self.assertRaises(AttributeError):
         shlex_instance.punctuation_chars = False
Example #37
0
def read(nml_fname, verbose=False):
    """Parse a Fortran 90 namelist file and store the contents in a ``dict``.

    >>> data_nml = f90nml.read('data.nml')"""

    nml_file = open(nml_fname, 'r')

    f90lex = shlex.shlex(nml_file)
    f90lex.commenters = '!'
    f90lex.escapedquotes = '\'"'
    f90lex.wordchars += '.-+'  # Include floating point characters
    tokens = iter(f90lex)

    # Store groups in case-insensitive dictionary
    nmls = NmlDict()

    for t in tokens:

        # Check for classic group terminator
        if t == 'end':
            try:
                t, prior_t = next(tokens), t
            except StopIteration:
                break

        # Ignore tokens outside of namelist groups
        while t != '&':
            t, prior_t = next(tokens), t

        # Current token is now '&'

        # Create the next namelist
        g_name = next(tokens)
        g_vars = NmlDict()

        v_name = None

        # Current token is either a variable name or finalizer (/, &)

        # Populate the namelist group
        while g_name:

            if not t in ('=', '%', '('):
                t, prior_t = next(tokens), t

                # Skip commas separating objects
                if t == ',':
                    t, prior_t = next(tokens), t

            # Diagnostic testing
            if verbose:
                print('  tokens: {} {}'.format(prior_t, t))

            # Set the next active variable
            if t in ('=', '(', '%'):
                v_name, v_values, t, prior_t = parse_f90var(tokens, t, prior_t)

                if v_name in g_vars:
                    v_prior_values = g_vars[v_name]
                    if not type(v_prior_values) is list:
                        v_prior_values = [v_prior_values]

                    v_values = merge_values(v_prior_values, v_values)

                if len(v_values) == 0:
                    v_values = None
                elif len(v_values) == 1:
                    v_values = v_values[0]

                g_vars[v_name] = v_values

                # Deselect variable
                v_name = None
                v_values = []

            # Finalise namelist group
            if t in ('/', '&'):

                # Append the grouplist to the namelist (including empty groups)
                if g_name in nmls:
                    g_update = nmls[g_name]

                    # Update to list of groups
                    if not type(g_update) is list:
                        g_update = [g_update]

                    g_update.append(g_vars)

                else:
                    g_update = g_vars

                nmls[g_name] = g_update

                if verbose:
                    print('{} saved with {}'.format(g_name, g_vars))

                # Reset state
                g_name, g_vars = None, None

    nml_file.close()

    return nmls
Example #38
0
def main():

    input_string = "i+i-i"
    input_ind = list(shlex.shlex(input_string))
    input_ind.append('$')

    master = {}
    master_list = []
    new_list = []
    non_terminals = []
    grammar = open('grammar.txt', 'r')

    for row2 in grammar:

        if '->' in row2:

            if len(new_list) == 0:
                start_state = row2[0]
                non_terminals.append(row2[0])
                new_list = []
                new_list.append(row2.rstrip('\n'))
            else:
                master_list.append(new_list)
                del new_list
                new_list = []
                new_list.append(row2.rstrip('\n'))
                non_terminals.append(row2[0])

        elif '%' in row2:
            new_list.append(row2.rstrip('\n'))
        print(row2)

    master_list.append(new_list)

    for x in range(len(master_list)):
        for y in range(len(master_list[x])):
            master_list[x][y] = [s.replace('%', '') for s in master_list[x][y]]
            master_list[x][y] = ''.join(master_list[x][y])
            print(master_list[x][y])
            master[master_list[x][y]] = non_terminals[x]

    for key, value in master.items():
        if '->' in key:
            length = len(key)
            for i in range(length):
                if key[i] == '-' and key[i + 1] == ">":
                    index = i + 2
                    break
            var_key = key
            new_key = key[index:]

    print(var_key, new_key)
    var = master[var_key]
    del master[var_key]
    master[new_key] = var

    order_table = []
    with open('order.csv', 'r') as file2:
        order = csv.reader(file2)
        for row in order:
            order_table.append(row)

    operators = order_table[0]
    print(order_table)

    stack = []

    stack.append('$')

    print("Stack", "\t\t\t\t", "Input", "\t\t\t\t", "Precedence relation",
          "\t\t", "Action")

    vlaag = 1
    arr1 = []
    while vlaag:
        if input_ind[0] == '$' and len(stack) == 2:
            vlaag = 0

        length = len(input_ind)

        buffer_inp = input_ind[0]
        temp1 = operators.index(str(buffer_inp))
        s = 1
        s += s
        print("stack", stack, stack[-1])
        print(len(stack))

        if stack[-1] in non_terminals:
            buffer_stack = stack[-2]
        else:
            buffer_stack = stack[-1]

        temp2 = operators.index(str(buffer_stack))
        #print buffer_inp, buffer_stack

        precedence = order_table[temp2][temp1]

        if precedence == '<':
            action = 'shift'
        elif precedence == '>':
            action = 'reduce'

        print(stack, "\t\t", input_ind, "\t\t", precedence, "\t\t", action,
              "\n")

        if action == 'shift':
            stack.append(buffer_inp)
            input_ind.remove(buffer_inp)
            if (stack[-1:] not in operators):
                #print(stack[-1:])
                temp = ''.join(stack[1:])
                arr1.append(temp)
        elif action == 'reduce':
            for key, value in master.items():
                var1 = ''.join(stack[-1:])
                var2 = ''.join(stack[-3:])
                #print(master.items())
                #print(var1,var2)
                if str(key) == str(buffer_stack):
                    stack[-1] = value
                    break
                elif key == var1 or stack[-3:] == list(var1):
                    stack[-3:] = value
                    break
                elif key == var2:
                    stack[-3:] = value
            if (stack[-1:] not in operators):

                temp = ''.join(stack[1:])
                arr1.append(temp)
        del buffer_inp, temp1, buffer_stack, temp2, precedence

        if vlaag == 0:
            print("Accepted!!")

    temp = ""
    l = 0
    print(arr1)
    print(input_string)
    for i in range(len(arr1)):
        if (len(arr1[i]) == 3 or len(arr1[i]) == 5 or len(arr1[i]) == 7
                or len(arr1[i]) == 9):
            if arr1[i][2] == 'i':
                temp = (arr1[i]) + input_string[l + 3:]
            elif arr1[i][2] == 'E':
                temp = (arr1[i]) + input_string[l + 3:]
                l += 2

            print(temp)

    print('E')

    return 2
def isValidSQL(input_sql):

    global error
    error = ""

    isValid = False
    state = 0
    warningFlag = 0
    columns_string = ""
    tables_string = ""
    conditions_string = ""
    schema_string = ""
    values_string = ""
    skipColumnCheck = False
    skipConditionsCheck = False

    lexer = shlex.shlex(input_sql, posix=True)
    #lexer.whitespace += ','
    lexer_list = list(lexer)

    #lexer_list = shlex.split(input_sql)

    print lexer_list[0].upper()
    #----- Start statement selection -----

    if lexer_list[0].upper() == "SELECT":
        checkType = "SELECT"
        # start loop
        for i in range(0, len(lexer_list)):
            #print lexer_list[i]

            #	Columns
            if state == 0:
                if lexer_list[i] == ";":
                    print "\n[ERROR isValidSQL] Unexpected query termination"
                    return False
                elif lexer_list[i].upper() == "FROM":
                    state = 1

                elif lexer_list[i].upper() != "SELECT":
                    columns_string += " "
                    columns_string += lexer_list[i]

            #	Tables
            elif state == 1:
                if lexer_list[i] == ";":
                    state = 3
                elif lexer_list[i].upper() == "WHERE":
                    state = 2
                elif lexer_list[i].upper() != "FROM":
                    tables_string += " "
                    tables_string += lexer_list[i]

            elif state == 2:
                if lexer_list[i] == ";":
                    state = 3
                elif lexer_list[i].upper() != "WHERE":
                    conditions_string += " "
                    conditions_string += lexer_list[i]
            elif state == 3:
                if warningFlag == 0:
                    print "\n[WARNING isValidSQL] Statements after ';' delimiter will be ignored"
                    warningFlag += 1
            else:
                print "\n[ERROR isValidSQL] Unknown state : " + str(state)
                return False

        # end loop
        if state == 2:
            print "\n [WARNING isValidSQL] Expected ';'"
            state = 3

    elif lexer_list[0].upper() == "DELETE":
        checkType = "DELETE"
        # start loop
        for i in range(0, len(lexer_list)):
            #print lexer_list[i]

            #	Columns
            if state == 0:
                if lexer_list[i] == ";":
                    print "\n[ERROR isValidSQL] Unexpected query termination"
                    return False
                elif lexer_list[i].upper() == "FROM":
                    state = 1
                    skipColumnCheck = True
            #	Tables
            elif state == 1:
                if lexer_list[i] == ";":
                    state = 3
                elif lexer_list[i].upper() == "WHERE":
                    state = 2
                elif lexer_list[i].upper() != "FROM":
                    tables_string += " "
                    tables_string += lexer_list[i]
                else:
                    print "\n[ERROR isValidSQL] Duplicate keyword : " + lexer_list[
                        i].upper()
                    return False
            elif state == 2:
                if lexer_list[i] == ";":
                    state = 3
                elif lexer_list[i].upper() != "WHERE":
                    conditions_string += " "
                    conditions_string += lexer_list[i]
                else:
                    print "\n[ERROR isValidSQL] Duplicate keyword : " + lexer_list[
                        i].upper()
                    return False
            elif state == 3:
                if warningFlag > 0:
                    print "\n[WARNING isValidSQL] Statements after ';' delimiter will be ignored"
                    warningFlag += 1
            else:
                print "\n[ERROR isValidSQL] Unknown state : " + str(state)
                return False
        # end loop

        if state == 2:
            print "\n [WARNING isValidSQL] Expected ';'"
            state = 3

    elif lexer_list[0].upper() == "INSERT":

        checkType = "INSERT"

        for i in range(0, len(lexer_list)):
            #print lexer_list[i]

            #	Columns
            if state == 0:
                if lexer_list[i].upper() == "INTO":
                    state = 1
            #	Tables
            elif state == 1:
                if lexer_list[i] == ";":
                    print "\n[ERROR isValidSQL] Unexpected query termination"
                    return False
                elif lexer_list[i].upper() == "VALUES":
                    state = 2
                elif lexer_list[i].upper() != "INTO":
                    schema_string += " "
                    schema_string += lexer_list[i]
                else:
                    print "\n[ERROR isValidSQL] Duplicate keyword : " + lexer_list[
                        i].upper()
                    return False
            elif state == 2:
                if lexer_list[i] == ";":
                    state = 3
                if lexer_list[i].upper() != "VALUES":
                    values_string += " "
                    values_string += lexer_list[i]
                else:
                    print "\n[ERROR isValidSQL] Duplicate keyword : " + lexer_list[
                        i].upper()
                    return False
            else:
                print "\n[ERROR isValidSQL] Unknown state : " + str(state)
                return False
        # end loop

        if state == 2:
            print "\n [WARNING isValidSQL] Expected ';'"
            state = 3
    else:
        print "\n[ERROR isValidSQL] Unknown command : " + lexer_list[0].upper()
        return False
    #----- End statement selection -----

    if state != 3:
        print "\n[ERROR isValidSQL] Unexpected query structure : " + str(state)
        return False

    if checkType == "INSERT":
        #Check INSERT
        target_columns = list()
        if isValidSchemaString(schema_string, target_columns):
            if isValidValuesString(values_string, target_columns):
                return True
            else:
                #print "\n[ERROR isValidSQL] isValidValuesString failed"
                #print "\n values_string : "
                #print "\n" + values_string
                return False
        else:
            #print "\n[ERROR isValidSQL] isValidSchemaString failed"
            #print "\n schema_string : "
            #print "\n" + schema_string
            return False
    else:
        selected_tables = list()
        selected_cloumns = list()

        #Check SELECT
        #Check DELETE
        if isValidTables(tables_string, selected_tables):
            if skipColumnCheck or isValidColumns(
                    columns_string, selected_tables, selected_cloumns):
                if skipConditionsCheck or isValidConditions(
                        conditions_string, selected_tables):
                    return True
                else:
                    #print "\n[ERROR isValidSQL] isValidConditions failed"
                    #print "\n conditions_string : "
                    #print "\n" + conditions_string
                    return False
            else:
                #print "\n[ERROR isValidSQL] isValidColumns failed"
                #print "\n columns_string : "
                #print "\n" + columns_string
                return False
        else:
            #print "\n[ERROR isValidSQL ] isValidTables failed"
            #print "\n tables_string : "
            #print "\n" + tables_string
            return False
    return False
Example #40
0
def parse_if_feature_expr(s):
    try:
        # Encoding to ascii works for valid if-feature-exprs, since all
        # pars are YANG identifiers (or the boolean keywords).
        # The reason for this fix is that in Python < 2.7.3, shlex would return
        # erroneous tokens if a unicode string was passed.
        # Also, shlex uses cStringIO internally which doesn't handle unicode
        # characters outside the ascii range anyway.
        if sys.version < '3':
            sx = shlex.shlex(s.encode("ascii"))
        else:
            sx = shlex.shlex(s)
    except UnicodeEncodeError:
        return None
    sx.wordchars += ":-"  # need to handle prefixes and '-' in the name
    operators = [None]
    operands = []
    precedence = {'not': 3, 'and': 2, 'or': 1, None: 0}

    def x():
        y()
        tok = sx.get_token()
        while tok in ('and', 'or'):
            push_operator(tok)
            y()
            tok = sx.get_token()
        sx.push_token(tok)
        while operators[-1] != None:
            pop_operator()

    def y():
        tok = sx.get_token()
        if tok == 'not':
            push_operator(tok)
            x()
        elif tok == '(':
            operators.append(None)
            x()
            tok = sx.get_token()
            if tok != ')':
                raise ValueError
            operators.pop()
        elif is_identifier(tok):
            operands.append(tok)
        else:
            raise ValueError

    def push_operator(op):
        while op_gt(operators[-1], op):
            pop_operator()
        operators.append(op)

    def pop_operator():
        op = operators.pop()
        if op == 'not':
            operands.append((op, operands.pop(), None))
        else:
            operands.append((op, operands.pop(), operands.pop()))

    def op_gt(op1, op2):
        return precedence[op1] > precedence[op2]

    def is_identifier(tok):
        return re_node_id.search(tok) is not None

    try:
        x()
        if sx.get_token() != '':
            raise ValueError
        return operands[-1]
    except ValueError:
        return None
Example #41
0
# NO EVENT SHALL DOUG HELLMANN BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""Parsing strings with shlex.

"""

__version__ = "$Id$"
#end_pymotw_header

import shlex
import sys

if len(sys.argv) != 2:
    print 'Please specify one filename on the command line.'
    sys.exit(1)

filename = sys.argv[1]
body = file(filename, 'rt').read()
print 'ORIGINAL:', repr(body)
print

print 'TOKENS:'
lexer = shlex.shlex(body)
lexer.whitespace += '.,'
for token in lexer:
    print repr(token)
Example #42
0
def adjacency_weight(adj, x, y):
    return adj[x][y]


def path_weight(p, adj):
    w = 0
    for i in range(1,len(p)):
        w += adj[p[i-1]][p[i]]
    return w






parser = shlex.shlex(sys.stdin)
parser.whitespace_split = True

vertices = int(parser.get_token())
edges = int(parser.get_token())
adjacency = [ [ sys.maxint for i in range(vertices+1) ] for j in range(vertices+1) ]
graph = range(1, vertices+1)

# always calculate the path from the first node to the last node and back
start = 1
destination = vertices
total_weight = 0

# print first line values (DEBUGGG)
#print >> sys.stderr, "Vertices:", vertices, "Edges:", edges
Example #43
0
def getList(st):
    # split a string into a list
    my_splitter = shlex.shlex(st, posix=True)
    my_splitter.whitespace += ','
    my_splitter.whitespace_split = True
    return list(my_splitter)
Example #44
0
 def testPunctuationInWordChars(self):
     """Test that any punctuation chars are removed from wordchars"""
     s = shlex.shlex('a_b__c', punctuation_chars='_')
     self.assertNotIn('_', s.wordchars)
     self.assertEqual(list(s), ['a', '_', 'b', '__', 'c'])
Example #45
0
#!/usr/bin/env
# coding:utf-8
"""
Created on 17/7/18 上午8:42

base Info
"""
__author__ = 'xiaochenwang94'
__version__ = '1.0'

import shlex

str = shlex.shlex("ab,'987,23462,sdfh',daslfjl:iosjfo", posix=True)
str.whitespace = ','
str.whitespace_split = True
b = list(str)
print(b)
 def __init__(
     self,
     file,
     parameter_files=[
         "*.logfile.000000.out", "*logfile.*", "input.nml", "*.nml"
     ],
     exclude=[],
     nml_exclude=["tracer_diagnostics_nml"],
     ignore_files=[],
     uppercase_only=False,
 ):
     self.dict = collections.OrderedDict()
     open_file, filename, ctime = openParameterFile(
         file, parameter_files=parameter_files, ignore_files=ignore_files)
     self.label = filename
     self.ctime = ctime
     excludes = r"|".join([fnmatch.translate(x) for x in exclude]) or r"$."
     nml_excludes = r"|".join([fnmatch.translate(x)
                               for x in nml_exclude]) or r"$."
     open_file = open_file.read()
     if not isinstance(open_file, str):
         open_file = open_file.decode("utf8")
     lex = shlex.shlex(open_file)
     lex.commenters = "!"
     lex.wordchars += ".+-&\"/'"
     tokens = iter(lex)
     vals = [None]
     block = []
     append = False
     in_namelist_block = False
     for t in tokens:
         # Cleanup the token
         if "\n" in t:
             t = re.sub("\n\s*", "", t)
         if t.startswith('"'):
             t = "'" + t[1:]
         if t.endswith('"'):
             t = t[:-1] + "'"
         if (len(t) > 1 and t.startswith("&")
                 and not re.match(nml_excludes, t[1:].lower())):
             if uppercase_only:  # Only examine uppercase namelist blocks
                 if t[1:].upper() == t[1:]:
                     block.append(t[1:].upper())
                     in_namelist_block = True
             else:
                 block.append(t[1:].lower())
                 in_namelist_block = True
         elif not in_namelist_block:
             continue
         elif t == "/":
             del block[-1]
             in_namelist_block = False
         elif t == "=":
             vals = []
             append = False
         elif ((t[0].isalpha() or t[0] == "_") and len(vals) > 0
               and (not t in ("F", "T"))):
             if len(block):
                 key = "%".join(block) + "%" + t.lower()
             else:
                 key = t.lower()
         elif t == ",":
             append = True
         elif append:
             vals.append(t)
             # if not key in exclude: self.dict[key] = ','.join(vals)
             if not re.match(excludes, key):
                 self.dict[key] = ",".join(vals)
             append = False
         else:
             vals.append(t)
             # if not key in exclude: self.dict[key] = ','.join(vals)
             if not re.match(excludes, key):
                 self.dict[key] = ",".join(vals)
Example #47
0
def parse_expression(expr, binary):
    """Parse an expression and converts it to some kind of SQL WHERE clause."""

    global has_software
    global has_network

    def add_softwares(field, binary):
        """Translate query field for softwares table."""

        global has_software
        has_software = True
        return ' %ssw.%s' % (binary, field)

    def add_networkports(field, binary):
        """Translate query field for networkports table."""

        global has_network
        has_network = True
        return ' %snp.%s' % (binary, field)

    fields = {
        'host': lambda: ' %sc.name' % binary,  # Accept both host and
        'hostname': lambda: ' %sc.name' % binary,  # hostname
        'osname': lambda: ' %sos.name' % binary,
        'osver': lambda: ' %sosv.name' % binary,
        'site': lambda: ' %sl.name' % binary,
        'domain': lambda: ' %sd.name' % binary,
        'model': lambda: ' %scm.name' % binary,
        'type': lambda: ' %sct.name' % binary,
        'vendor': lambda: ' %sm.name' % binary,
        'status': lambda: ' %ss.name' % binary,
        'entity': lambda: ' %se.name' % binary,
        'user': lambda: ' %su.name' % binary,
        'group': lambda: ' %sg.name' % binary,
        'techuser': lambda: ' %stu.name' % binary,
        'techgroup': lambda: ' %stg.name' % binary,
        'software': lambda: add_softwares('name', binary),
        'mac': lambda: add_networkports('mac', binary),
        'ip': lambda: add_networkports('ip', binary),
        'netmask': lambda: add_networkports('netmask', binary),
        'subnet': lambda: add_networkports('subnet', binary),
        'gateway': lambda: add_networkports('gateway', binary),
    }

    FIELD = 0
    OP = 1
    LITERAL = 2
    CONDOP = 3

    res = ''
    token = FIELD
    for i in list(shlex.shlex(expr)):
        if token == FIELD:
            try:
                # Try to translate the field in the expression by the DB field
                i = fields[i]()
                token = OP
            except:
                # If translateion fails, never mind... leave field as expressed
                # sys.exc_clear()   # this is gone in Python 3
                pass
        elif token == OP:
            if i != 'not':
                token = LITERAL
        elif token == LITERAL:
            # add quotes to the string if not there yet
            if not (i.startswith('"') or i.startswith("'")):
                i = "'%s" % i
            if not (i.endswith('"') or i.endswith("'")):
                i = "%s'" % i
            token = CONDOP
        elif token == CONDOP:
            token = FIELD

        res += " %s" % i

    return "(%s )" % res
Example #48
0
def DoxyfileParse(file_contents):
    """
   Parse a Doxygen source file and return a dictionary of all the values.
   Values will be strings and lists of strings.
   """
    data = {}

    import shlex
    lex = shlex.shlex(instream=file_contents.decode(), posix=True)
    lex.wordchars += "*+./-:"
    lex.whitespace = lex.whitespace.replace("\n", "")
    lex.escape = ""

    lineno = lex.lineno
    token = lex.get_token()
    key = token  # the first token should be a key
    last_token = ""
    key_token = False
    next_key = False
    new_data = True

    def append_data(data, key, new_data, token):
        if new_data or len(data[key]) == 0:
            data[key].append(token)
        else:
            data[key][-1] += token

    while token:
        if token in ['\n']:
            if last_token not in ['\\']:
                key_token = True
        elif token in ['\\']:
            pass
        elif key_token:
            key = token
            key_token = False
        else:
            if token == "+=":
                if not key in data:
                    data[key] = list()
            elif token == "=":
                data[key] = list()
            else:
                append_data(data, key, new_data, token)
                new_data = True

        last_token = token
        token = lex.get_token()

        if last_token == '\\' and token != '\n':
            new_data = False
            append_data(data, key, new_data, '\\')

    # compress lists of len 1 into single strings
    to_pop = []
    for (k, v) in data.items():
        if len(v) == 0:
            # data.pop(k)  # Shouldn't modify dictionary while looping
            to_pop.append(k)

        # items in the following list will be kept as lists and not converted to strings
        if k in ["INPUT", "FILE_PATTERNS", "EXCLUDE_PATTERNS"]:
            continue

        if len(v) == 1:
            data[k] = v[0]

    for k in to_pop:
        data.pop(k)

    return data
Example #49
0
def parse_flags(self, line, uselib_store, env=None, force_static=False):
    """
	Parse the flags from the input lines, and add them to the relevant use variables::

		def configure(conf):
			conf.parse_flags('-O3', 'FOO')
			# conf.env.CXXFLAGS_FOO = ['-O3']
			# conf.env.CFLAGS_FOO = ['-O3']

	:param line: flags
	:type line: string
	:param uselib_store: where to add the flags
	:type uselib_store: string
	:param env: config set or conf.env by default
	:type env: :py:class:`waflib.ConfigSet.ConfigSet`
	"""

    assert (isinstance(line, str))

    env = env or self.env

    # append_unique is not always possible
    # for example, apple flags may require both -arch i386 and -arch ppc

    app = env.append_value
    appu = env.append_unique
    #lst = shlex.split(line)
    # issue #811
    lex = shlex.shlex(line, posix=False)
    lex.whitespace_split = True
    lex.commenters = ''
    lst = list(lex)

    uselib = uselib_store
    while lst:
        x = lst.pop(0)
        st = x[:2]
        ot = x[2:]

        if st == '-I' or st == '/I':
            if not ot: ot = lst.pop(0)
            appu('INCLUDES_' + uselib, [ot])
        elif st == '-include':
            tmp = [x, lst.pop(0)]
            app('CFLAGS', tmp)
            app('CXXFLAGS', tmp)
        elif st == '-D' or (env.CXX_NAME == 'msvc'
                            and st == '/D'):  # not perfect but..
            if not ot: ot = lst.pop(0)
            app('DEFINES_' + uselib, [ot])
        elif st == '-l':
            if not ot: ot = lst.pop(0)
            prefix = force_static and 'STLIB_' or 'LIB_'
            appu(prefix + uselib, [ot])
        elif st == '-L':
            if not ot: ot = lst.pop(0)
            appu('LIBPATH_' + uselib, [ot])
        elif x.startswith('/LIBPATH:'):
            appu('LIBPATH_' + uselib, [x.replace('/LIBPATH:', '')])
        elif x == '-pthread' or x.startswith('+') or x.startswith('-std'):
            app('CFLAGS_' + uselib, [x])
            app('CXXFLAGS_' + uselib, [x])
            app('LINKFLAGS_' + uselib, [x])
        elif x == '-framework':
            appu('FRAMEWORK_' + uselib, [lst.pop(0)])
        elif x.startswith('-F'):
            appu('FRAMEWORKPATH_' + uselib, [x[2:]])
        elif x.startswith('-Wl'):
            app('LINKFLAGS_' + uselib, [x])
        elif x.startswith('-m') or x.startswith('-f') or x.startswith(
                '-dynamic'):
            app('CFLAGS_' + uselib, [x])
            app('CXXFLAGS_' + uselib, [x])
        elif x.startswith('-bundle'):
            app('LINKFLAGS_' + uselib, [x])
        elif x.startswith('-undefined'):
            arg = lst.pop(0)
            app('LINKFLAGS_' + uselib, [x, arg])
        elif x.startswith('-arch') or x.startswith('-isysroot'):
            tmp = [x, lst.pop(0)]
            app('CFLAGS_' + uselib, tmp)
            app('CXXFLAGS_' + uselib, tmp)
            app('LINKFLAGS_' + uselib, tmp)
        elif x.endswith('.a') or x.endswith('.so') or x.endswith(
                '.dylib') or x.endswith('.lib'):
            appu('LINKFLAGS_' + uselib, [x])  # not cool, #762
Example #50
0
    def visit_attribute(self, node):
        """check that the accessed attribute exists

        to avoid too much false positives for now, we'll consider the code as
        correct if a single of the inferred nodes has the accessed attribute.

        function/method, super call and metaclasses are ignored
        """
        # generated_members may contain regular expressions
        # (surrounded by quote `"` and followed by a comma `,`)
        # REQUEST,aq_parent,"[a-zA-Z]+_set{1,2}"' =>
        # ('REQUEST', 'aq_parent', '[a-zA-Z]+_set{1,2}')
        if isinstance(self.config.generated_members, str):
            gen = shlex.shlex(self.config.generated_members)
            gen.whitespace += ','
            gen.wordchars += '[]-+'
            self.config.generated_members = tuple(
                tok.strip('"') for tok in gen)
        for pattern in self.config.generated_members:
            # attribute is marked as generated, stop here
            if re.match(pattern, node.attrname):
                return
        try:
            infered = list(node.expr.infer())
        except exceptions.InferenceError:
            return
        # list of (node, nodename) which are missing the attribute
        missingattr = set()
        inference_failure = False
        for owner in infered:
            # skip yes object
            if owner is astroid.YES:
                inference_failure = True
                continue

            name = getattr(owner, 'name', None)
            if _is_owner_ignored(owner, name, self.config.ignored_classes,
                                 self.config.ignored_modules):
                continue

            try:
                if not [
                        n for n in owner.getattr(node.attrname)
                        if not isinstance(n.statement(), astroid.AugAssign)
                ]:
                    missingattr.add((owner, name))
                    continue
            except AttributeError:
                # XXX method / function
                continue
            except exceptions.NotFoundError:
                # This can't be moved before the actual .getattr call,
                # because there can be more values inferred and we are
                # stopping after the first one which has the attribute in question.
                # The problem is that if the first one has the attribute,
                # but we continue to the next values which doesn't have the
                # attribute, then we'll have a false positive.
                # So call this only after the call has been made.
                if not _emit_no_member(node, owner, name,
                                       self.config.ignore_mixin_members):
                    continue
                missingattr.add((owner, name))
                continue
            # stop on the first found
            break
        else:
            # we have not found any node with the attributes, display the
            # message for infered nodes
            done = set()
            for owner, name in missingattr:
                if isinstance(owner, astroid.Instance):
                    actual = owner._proxied
                else:
                    actual = owner
                if actual in done:
                    continue
                done.add(actual)
                confidence = INFERENCE if not inference_failure else INFERENCE_FAILURE
                self.add_message('no-member',
                                 node=node,
                                 args=(owner.display_type(), name,
                                       node.attrname),
                                 confidence=confidence)
Example #51
0
def parse_flags(self,
                line,
                uselib_store,
                env=None,
                force_static=False,
                posix=None):
    assert (isinstance(line, str))
    env = env or self.env
    if posix is None:
        posix = True
        if '\\' in line:
            posix = ('\\ ' in line) or ('\\\\' in line)
    lex = shlex.shlex(line, posix=posix)
    lex.whitespace_split = True
    lex.commenters = ''
    lst = list(lex)
    app = env.append_value
    appu = env.append_unique
    uselib = uselib_store
    static = False
    while lst:
        x = lst.pop(0)
        st = x[:2]
        ot = x[2:]
        if st == '-I' or st == '/I':
            if not ot: ot = lst.pop(0)
            appu('INCLUDES_' + uselib, [ot])
        elif st == '-i':
            tmp = [x, lst.pop(0)]
            app('CFLAGS', tmp)
            app('CXXFLAGS', tmp)
        elif st == '-D' or (env.CXX_NAME == 'msvc' and st == '/D'):
            if not ot: ot = lst.pop(0)
            app('DEFINES_' + uselib, [ot])
        elif st == '-l':
            if not ot: ot = lst.pop(0)
            prefix = (force_static or static) and 'STLIB_' or 'LIB_'
            appu(prefix + uselib, [ot])
        elif st == '-L':
            if not ot: ot = lst.pop(0)
            prefix = (force_static or static) and 'STLIBPATH_' or 'LIBPATH_'
            appu(prefix + uselib, [ot])
        elif x.startswith('/LIBPATH:'):
            prefix = (force_static or static) and 'STLIBPATH_' or 'LIBPATH_'
            appu(prefix + uselib, [x.replace('/LIBPATH:', '')])
        elif x == '-pthread' or x.startswith('+') or x.startswith('-std'):
            app('CFLAGS_' + uselib, [x])
            app('CXXFLAGS_' + uselib, [x])
            app('LINKFLAGS_' + uselib, [x])
        elif x == '-framework':
            appu('FRAMEWORK_' + uselib, [lst.pop(0)])
        elif x.startswith('-F'):
            appu('FRAMEWORKPATH_' + uselib, [x[2:]])
        elif x == '-Wl,-rpath' or x == '-Wl,-R':
            app('RPATH_' + uselib, lst.pop(0).lstrip('-Wl,'))
        elif x.startswith('-Wl,-R,'):
            app('RPATH_' + uselib, x[7:])
        elif x.startswith('-Wl,-R'):
            app('RPATH_' + uselib, x[6:])
        elif x.startswith('-Wl,-rpath,'):
            app('RPATH_' + uselib, x[11:])
        elif x == '-Wl,-Bstatic' or x == '-Bstatic':
            static = True
        elif x == '-Wl,-Bdynamic' or x == '-Bdynamic':
            static = False
        elif x.startswith('-Wl'):
            app('LINKFLAGS_' + uselib, [x])
        elif x.startswith('-m') or x.startswith('-f') or x.startswith(
                '-dynamic'):
            app('CFLAGS_' + uselib, [x])
            app('CXXFLAGS_' + uselib, [x])
        elif x.startswith('-bundle'):
            app('LINKFLAGS_' + uselib, [x])
        elif x.startswith('-undefined') or x.startswith('-Xlinker'):
            arg = lst.pop(0)
            app('LINKFLAGS_' + uselib, [x, arg])
        elif x.startswith('-arch') or x.startswith('-isysroot'):
            tmp = [x, lst.pop(0)]
            app('CFLAGS_' + uselib, tmp)
            app('CXXFLAGS_' + uselib, tmp)
            app('LINKFLAGS_' + uselib, tmp)
        elif x.endswith('.a') or x.endswith('.so') or x.endswith(
                '.dylib') or x.endswith('.lib'):
            appu('LINKFLAGS_' + uselib, [x])
Example #52
0
    def lineReceived(self, line: str) -> None:
        log.msg(eventid="cowrie.command.input",
                input=line,
                format="CMD: %(input)s")
        self.lexer = shlex.shlex(instream=line,
                                 punctuation_chars=True,
                                 posix=True)
        # Add these special characters that are not in the default lexer
        self.lexer.wordchars += "@%{}=$:+^,()`"

        tokens: list[str] = []

        while True:
            try:
                tok: str = self.lexer.get_token()
                # log.msg("tok: %s" % (repr(tok)))

                if tok == self.lexer.eof:
                    if tokens:
                        self.cmdpending.append(tokens)
                    break

                # For now, treat && and || same as ;, just execute without checking return code
                if tok == "&&" or tok == "||":
                    if tokens:
                        self.cmdpending.append(tokens)
                        tokens = []
                        continue
                    else:
                        self.protocol.terminal.write(
                            f"-bash: syntax error near unexpected token `{tok}'\n"
                            .encode())
                        break
                elif tok == ";":
                    if tokens:
                        self.cmdpending.append(tokens)
                        tokens = []
                        continue
                    else:
                        self.protocol.terminal.write(
                            f"-bash: syntax error near unexpected token `{tok}'\n"
                            .encode())
                        break
                elif tok == "$?":
                    tok = "0"
                elif tok[0] == "(":
                    cmd = self.do_command_substitution(tok)
                    tokens = cmd.split()
                    continue
                elif "$(" in tok or "`" in tok:
                    tok = self.do_command_substitution(tok)
                elif tok.startswith("${"):
                    envRex = re.compile(r"^\$([_a-zA-Z0-9]+)$")
                    envSearch = envRex.search(tok)
                    if envSearch is not None:
                        envMatch = envSearch.group(1)
                        if envMatch in list(self.environ.keys()):
                            tok = self.environ[envMatch]
                        else:
                            continue
                elif tok.startswith("$"):
                    envRex = re.compile(r"^\${([_a-zA-Z0-9]+)}$")
                    envSearch = envRex.search(tok)
                    if envSearch is not None:
                        envMatch = envSearch.group(1)
                        if envMatch in list(self.environ.keys()):
                            tok = self.environ[envMatch]
                        else:
                            continue

                tokens.append(tok)
            except Exception as e:
                self.protocol.terminal.write(
                    b"-bash: syntax error: unexpected end of file\n")
                # Could run runCommand here, but i'll just clear the list instead
                log.msg(f"exception: {e}")
                self.cmdpending = []
                self.showPrompt()
                return

        if self.cmdpending:
            self.runCommand()
        else:
            self.showPrompt()
Example #53
0
    def _parse(self, file, fp, default_netrc):
        lexer = shlex.shlex(fp)
        lexer.wordchars += r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
        lexer.commenters = lexer.commenters.replace('#', '')
        while True:
            # Look for a machine, default, or macdef top-level keyword
            toplevel = tt = lexer.get_token()
            if not tt:
                break
            elif tt[0] == '#':
                # seek to beginning of comment, in case reading the token put
                # us on a new line, and then skip the rest of the line.
                pos = len(tt) + 1
                lexer.instream.seek(-pos, 1)
                lexer.instream.readline()
                continue
            elif tt == 'machine':
                entryname = lexer.get_token()
            elif tt == 'default':
                entryname = 'default'
            elif tt == 'macdef':                # Just skip to end of macdefs
                entryname = lexer.get_token()
                self.macros[entryname] = []
                lexer.whitespace = ' \t'
                while True:
                    line = lexer.instream.readline()
                    if not line or line == '\012':
                        lexer.whitespace = ' \t\r\n'
                        break
                    self.macros[entryname].append(line)
                continue
            else:
                raise NetrcParseError(
                    "bad toplevel token %r" % tt, file, lexer.lineno)

            # We're looking at start of an entry for a named machine or default.
            login = ''
            account = password = None
            self.hosts[entryname] = {}
            while True:
                tt = lexer.get_token()
                if (tt.startswith('#') or
                    tt in {'', 'machine', 'default', 'macdef'}):
                    if password:
                        self.hosts[entryname] = (login, account, password)
                        lexer.push_token(tt)
                        break
                    else:
                        raise NetrcParseError(
                            "malformed %s entry %s terminated by %s"
                            % (toplevel, entryname, repr(tt)),
                            file, lexer.lineno)
                elif tt == 'login' or tt == 'user':
                    login = lexer.get_token()
                elif tt == 'account':
                    account = lexer.get_token()
                elif tt == 'password':
                    if os.name == 'posix' and default_netrc:
                        prop = os.fstat(fp.fileno())
                        if prop.st_uid != os.getuid():
                            try:
                                fowner = pwd.getpwuid(prop.st_uid)[0]
                            except KeyError:
                                fowner = 'uid %s' % prop.st_uid
                            try:
                                user = pwd.getpwuid(os.getuid())[0]
                            except KeyError:
                                user = '******' % os.getuid()
                            raise NetrcParseError(
                                ("~/.netrc file owner (%s) does not match"
                                 " current user (%s)") % (fowner, user),
                                file, lexer.lineno)
                        if (prop.st_mode & (stat.S_IRWXG | stat.S_IRWXO)):
                            raise NetrcParseError(
                               "~/.netrc access too permissive: access"
                               " permissions must restrict access to only"
                               " the owner", file, lexer.lineno)
                    password = lexer.get_token()
                else:
                    raise NetrcParseError("bad follower token %r" % tt,
                                          file, lexer.lineno)
Example #54
0
    def __lineParseMCP(self, line):
        """Process an out-of-band message."""

        line = line[len(MCP.MCP_HEADER):]

        try:
            (message, line) = re.split(r'\s+', line, 1)
        except ValueError:
            (message, line) = (line, '')

        if message == ':':  # end of multi-line stanza
            self.__multiKeyEnd(line)

        elif message == '*':  # continuation of multi-line stanza
            self.__multiKeyContinue(line)

        else:  # simple message
            # "#$#message authkey [k: v [...]]"
            inProgress = MCP.__InProgress(message)
            multiline = False

            if self.version:
                try:
                    (authKey, line) = re.split(r'\s+', line, 1)
                except ValueError:
                    (authKey, line) = (line, '')

                if authKey != self.authKey:
                    logger.warning("ignoring message with foreign key '%s'",
                                   authKey)
                    return

            lexer = shlex.shlex(line, posix=True)
            lexer.commenters = ''
            lexer.quotes = '"'
            lexer.whitespace_split = True
            try:
                for key in lexer:
                    # keys are case-insensitive, normalize here
                    key = key.lower()

                    if key[-1] != ':':
                        logger.warning("message '%s' could not parse key '%s'",
                                       message, key)
                        return
                    key = key[:-1]

                    if key[0] not in string.ascii_lowercase:
                        logger.warning(
                            "message '%s' ignored due to invalid key '%s'",
                            message, key)
                        return
                    if not set(key).issubset(MCP.KEY_SET):
                        logger.warning(
                            "message '%s' ignored due to invalid key '%s'",
                            message, key)
                        return

                    try:
                        value = next(lexer)
                    except StopIteration:
                        logger.warning(
                            "message '%s' has key '%s' without value", message,
                            key)
                        return

                    if key[-1] == '*':
                        key = key[:-1]
                        if key in inProgress.multiData or key in inProgress.data:
                            logger.warning(
                                "message '%s' ignoring duplicate key '%s'",
                                message, key)
                            continue
                        inProgress.multiData[key] = []
                        multiline = True
                    else:
                        if key in inProgress.data or key in inProgress.multiData:
                            logger.warning(
                                "message '%s' ignoring duplicate key '%s'",
                                message, key)
                            continue
                        inProgress.data[key] = value

            except ValueError:
                logger.warning("message '%s' has unparsable data", message)
                return

            if multiline:
                if '_data-tag' not in inProgress.data:
                    logger.warning(
                        "ignoring message with multi-line variables but no _data-tag"
                    )
                    return
                self.inProgress[inProgress.data['_data-tag']] = inProgress
                self.messageUpdate(inProgress.data['_data-tag'], None)
            else:
                self.__dispatchMessage(inProgress.message,
                                       inProgress.allData())
 def __init__(
     self,
     file,
     parameter_files=[
         "*MOM_parameter_doc.all",
         "*MOM_parameter_doc.short",
         "MOM_input",
     ],
     exclude=[],
     ignore_files=[],
     model_name=None,
 ):
     self.dict = collections.OrderedDict()
     open_file, filename, ctime = openParameterFile(
         file, parameter_files=parameter_files, ignore_files=ignore_files)
     self.label = filename
     self.ctime = ctime
     open_file = open_file.read()
     if not isinstance(open_file, str):
         open_file = open_file.decode("utf8")
     lex = shlex.shlex(open_file)
     lex.commenters = "!"
     lex.wordchars += ".+-%"
     tokens = iter(lex)
     vals = []
     block = []
     lhs = True
     append = False
     for t in tokens:
         t = str(t)
         # if t.startswith('"'): t = "'"+t[1:] # Avoids escaping double quotes in JSON
         # if t.endswith('"'): t = t[:-1]+"'" # Avoids escaping double quotes in JSON
         if t.endswith("%"):
             block.append(t)
             lhs = True
         elif t.startswith("%"):
             del block[-1]
             lhs = True
         elif (t == "=") and lhs:
             vals = []
             lhs = False
         elif (t == "=") and not lhs:
             raise Exception("Not lhs")
         elif t == ",":
             append = True
         elif append:
             vals.append(t)
             if not key in exclude:
                 self.dict[key] = ",".join(vals)
             append = False
         elif lhs:
             if len(block):
                 key = "".join(block) + t
             else:
                 key = t
             if model_name is not None:
                 key = model_name + "%" + key
         else:
             vals.append(t)
             if not key in exclude:
                 self.dict[key] = ",".join(vals)
             lhs = True
Example #56
0
    def __get_value(self, line, key):
        """
        Get the 'value' of a kv pair for the key given, from the line given

        :param line: the line to search in
        :type line: String
        :param key: the key for the value
        :type key: String

        :returns: String containing the value or None
        """
        # Check if the line is of type:
        #     <attribute name> = <value>
        line_list_spaces = line.split()
        if line_list_spaces is not None:
            first_word = line_list_spaces[0]
            if key == first_word:
                # Check that this word is followed by an '=' sign
                equals_sign = line_list_spaces[1]
                if equals_sign == "=":
                    # Ok, we are going to assume that this is enough to
                    # determine that this is the correct type
                    # return everything after the '=" as value
                    val_index = line.index("=") + 1
                    value = line[val_index:].strip()
                    return value

        # Check that a valid instance of this key exists in the string
        kv = self.__verify_key(line, key)
        if kv is None:
            return None
        key_index, val_char = kv

        # Assumption: the character before the key is the delimiter
        # for the k-v pair
        delimiter = line[key_index - 1]
        if delimiter is None:
            # Hard luck, now there's no way to know, let's just assume
            # that space is the delimiter and hope for the best
            delimiter = " "

        # Determine the value's start index
        index_after_key = key_index + len(key)
        value_index = line[index_after_key:].find(val_char) + index_after_key

        # Get the value
        lexer = shlex.shlex(line[value_index:], posix=True)
        lexer.whitespace = delimiter
        lexer.whitespace_split = True
        try:
            value = lexer.get_token()
        except ValueError:
            # Sometimes, the data can be incoherent with things like
            # Unclosed quotes, which makes get_token() throw an exception
            # Just return None
            return None

        # Strip the value of any trailing whitespaces (like newlines)
        value = value.rstrip()

        return value
Example #57
0
def _extract_sm_sample_meta(file_or_handle):
    # Return a tab-delimited matrix of the results.
    import gzip
    import shlex
    import filelib

    # Sometimes there are \r embedded within quotes.  Split lines
    # based on \n.
    if type(file_or_handle) is type(""):
        filename = file_or_handle
        if filename.endswith(".gz"):
            handle = gzip.open(filename, 'rb')
        elif filename.endswith(".bz2"):
            cmd = "bzcat '%s'" % filename
            w, r, e = filelib._my_popen(cmd)
            w.close()
            e.close()
            handle = r
        else:
            # Assume text file.
            handle = open(filename)
    else:
        handle = file_or_handle
    lines = handle.read().split("\n")

    # Sometimes if a line is too long, it will be continued on the
    # next line, e.g.
    # !Sample_extract_protocol_ch1  <TEXT>  <TEXT>  <TEXT>
    # !Sample_extract_protocol_ch1  <CONT>  <CONT>  <CONT>
    # !Sample_extract_protocol_ch1          <CONT>  <CONT>
    # !Sample_extract_protocol_ch1          <CONT>  <CONT>
    #
    # Notes:
    # o It seems like the !Sample_<name> is consistent.
    # o Some of the columns can be blank if there is no continuation.
    # o Sometimes the close quotes are missing from <TEXT> or <CONT>.
    matrix = []
    num_cols = None
    for line in lines:
        line = line.rstrip("\n")

        if not line.startswith("!Sample"):
            continue

        # Split by \t.
        # Use shlex because sometimes there are tabs within the
        # quotes.  However, this will fail if the close quote is
        # missing.  If this is the case, parse without shlex and hope
        # there aren't any internal tabs.
        if not line.endswith('"'):
            cols = line.split("\t")
        else:
            x = shlex.shlex(line)
            x.whitespace = "\t"
            x.whitespace_split = True
            cols = [x for x in x]
        # Sometimes shlex will mess up if there are quotes within
        # strings.  If this happens, fall back on just splitting on
        # tabs.
        if num_cols and len(cols) != num_cols:
            cols = line.split("\t")
        if line.startswith("!Sample_title"):
            num_cols = None
        if num_cols is None:
            num_cols = len(cols)
        assert len(cols) == num_cols, "%s: has %d expect %d" % (
            cols[0], len(cols), num_cols)
        cols = [_remove_quotes(x) for x in cols]
        cols = [x.replace("\t", " ").strip() for x in cols]
        cols = [x.replace("\r", " ").strip() for x in cols]
        matrix.append(cols)
    return matrix
Example #58
0
 def do_test(self,arg):
     lex = shlex.shlex(arg)
     for x in lex:
         print x
def isValidTables(tables_string, selected_tables):

    #load tables
    table_list = loadTables("ALL_TABLES")

    #----------------------------------------------------

    isValid = False
    state = 0

    lexer = shlex.shlex(tables_string, posix=True)
    #lexer.whitespace += ','
    lexer_list = list(lexer)

    for i in range(0, len(lexer_list)):
        #print lexer_list[i]

        #	States:
        #	0	Initial (checks)
        #	1	AS
        #	2	Check for alias validity
        #	3	Check for comma ( -> 0)

        if state == 0:

            table_alias = lexer_list[i].upper()
            result = next((i for i, v in enumerate(table_list)
                           if v[0].upper() == table_alias), -1)

            if result != -1:
                selected_tables.append(table_list[result])
                state = 1
            else:
                print "\n[ERROR] Expected a valid target table. Read the goddamn schema man, what the hell is " + str(
                    lexer_list[i].upper()) + "?"
                return False

        #Check for comma or AS
        elif state == 1:
            if lexer_list[i] == ",":

                state = 0
            elif lexer_list[i].upper() == "AS":
                state = 2
            else:
                print "\n[ERROR] Expected ',' or AS keyword, but instead we got this shit : " + str(
                    lexer_list[i].upper())
                return False

        #Check for alias validity
        elif state == 2:
            candidate_alias = lexer_list[i].upper()
            #check if alias is an existing alias or table name
            if isValidAlias(candidate_alias, selected_tables):

                temp_tuple = selected_tables[-1]

                temp_t0 = candidate_alias
                temp_t1 = temp_tuple[1]
                temp_t2 = temp_tuple[2]

                temp_tuple = (temp_t0, temp_t1, temp_t2)

                selected_tables[-1] = temp_tuple

                state = 3
            else:
                return False
        elif state == 3:
            if lexer_list[i] == ",":
                state = 0
            else:
                print "\n[ERROR] Expected ','"
                return False
        else:
            print "\n[ERROR] Unknown state : " + str(state)
            return False

    if (state == 3) or (state == 1):
        return True

    return False
    def _parse_os_release_content(lines):
        """
        Parse the lines of an os-release file.

        Parameters:

        * lines: Iterable through the lines in the os-release file.
                 Each line must be a unicode string or a UTF-8 encoded byte
                 string.

        Returns:
            A dictionary containing all information items.
        """
        props = {}
        lexer = shlex.shlex(lines, posix=True)
        lexer.whitespace_split = True

        # The shlex module defines its `wordchars` variable using literals,
        # making it dependent on the encoding of the Python source file.
        # In Python 2.6 and 2.7, the shlex source file is encoded in
        # 'iso-8859-1', and the `wordchars` variable is defined as a byte
        # string. This causes a UnicodeDecodeError to be raised when the
        # parsed content is a unicode object. The following fix resolves that
        # (... but it should be fixed in shlex...):
        if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
            lexer.wordchars = lexer.wordchars.decode('iso-8859-1')

        tokens = list(lexer)
        for token in tokens:
            # At this point, all shell-like parsing has been done (i.e.
            # comments processed, quotes and backslash escape sequences
            # processed, multi-line values assembled, trailing newlines
            # stripped, etc.), so the tokens are now either:
            # * variable assignments: var=value
            # * commands or their arguments (not allowed in os-release)
            if '=' in token:
                k, v = token.split('=', 1)
                if isinstance(v, bytes):
                    v = v.decode('utf-8')
                props[k.lower()] = v
            else:
                # Ignore any tokens that are not variable assignments
                pass

        if 'version_codename' in props:
            # os-release added a version_codename field.  Use that in
            # preference to anything else Note that some distros purposefully
            # do not have code names.  They should be setting
            # version_codename=""
            props['codename'] = props['version_codename']
        elif 'ubuntu_codename' in props:
            # Same as above but a non-standard field name used on older Ubuntus
            props['codename'] = props['ubuntu_codename']
        elif 'version' in props:
            # If there is no version_codename, parse it from the version
            codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
            if codename:
                codename = codename.group()
                codename = codename.strip('()')
                codename = codename.strip(',')
                codename = codename.strip()
                # codename appears within paranthese.
                props['codename'] = codename

        return props