Esempio n. 1
0
 def __init__(self, name, description="", *values):
     self.__name = utils.unicodify(name)
     self.__description = utils.unicodify(description)
     self.resolved_num = 0
     self.__values = list()
     self.__non_freeze = False
     self.__values_are_frozen = False
     self.__freeze_values_on_first_resolve = False
     ConfigVar.extend(self, values) # explicit call so ConstConfigVar can be initialized
Esempio n. 2
0
def read_file_or_url(in_file_or_url, path_searcher=None, encoding='utf-8', save_to_path=None, checksum=None):
    need_to_download = not utils.check_file_checksum(save_to_path, checksum)
    if not need_to_download:
        # if save_to_path contains the correct data just read it by recursively
        # calling read_file_or_url
        return read_file_or_url(save_to_path, encoding=encoding)
    match = protocol_header_re.match(in_file_or_url)
    if not match:  # it's a local file
        local_file_path = in_file_or_url
        if path_searcher is not None:
            local_file_path = path_searcher.find_file(local_file_path)
        if local_file_path:
            if 'Win' in utils.get_current_os_names():
                local_file_path = os.path.abspath(local_file_path)
            else:
                local_file_path = os.path.realpath(local_file_path)
        else:
            raise FileNotFoundError("Could not locate local file", local_file_path)
        if encoding is None:
            fd = open(local_file_path, "rb")
        else:
            fd = open(local_file_path, "r", encoding=encoding)
        buffer = fd.read()
    else:
        session = pyinstl.connectionBase.connection_factory().get_session(in_file_or_url)
        response = session.get(in_file_or_url, timeout=(33.05, 180.05))
        response.raise_for_status()
        buffer = response.text
    buffer = utils.unicodify(buffer) # make sure text is unicode
    if save_to_path and in_file_or_url != save_to_path:
        with open(save_to_path, "w") as wfd:
            wfd.write(buffer)
    return buffer
Esempio n. 3
0
def read_from_file_or_url(in_url,
                          config_vars,
                          translate_url_callback=None,
                          expected_checksum=None,
                          encoding='utf-8'):
    """ Read a file from local disk or url. Check checksum if given.
        If test against either sig or checksum fails - raise IOError.
        Return: file contents.
    """
    with open_for_read_file_or_url(in_url,
                                   config_vars,
                                   translate_url_callback,
                                   encoding=encoding) as open_file:
        contents_buffer = open_file.fd.read()
        if encoding is not None:  # when reading from url we're not sure what the encoding is
            contents_buffer = utils.unicodify(contents_buffer,
                                              encoding=encoding)
        # check checksum only if  given
        if expected_checksum is not None:
            if len(contents_buffer) == 0:
                raise IOError(
                    f"Empty contents returned from {in_url} ; expected checksum: {expected_checksum} ; encoding: {encoding}"
                )
            if encoding is not None:
                raise IOError(
                    f"Checksum check requested for {in_url} but encoding is not None, encoding: {encoding} ; expected checksum: {expected_checksum}"
                )
            buffer_ok = utils.check_buffer_checksum(contents_buffer,
                                                    expected_checksum)
            if not buffer_ok:
                actual_checksum = utils.get_buffer_checksum(contents_buffer)
                raise IOError(
                    f"Checksum mismatch {in_url} expected checksum:  {expected_checksum} actual checksum: {actual_checksum} encoding: {encoding}"
                )
    return contents_buffer
Esempio n. 4
0
    def pluralize(self, word):
        '''
        Pluralizes Spanish nouns.
        Input string can be Unicode (e.g. u"palabra"), or a str encoded in UTF-8 or Latin-1.
        Output string will be encoded the same way as the input.
        '''

        word, origType = utils.unicodify(word)  # all internal calculations are done in Unicode

        rules = [
            [u'(?i)([aeiou])x$', u'\\1x'],
            # This could fail if the word is oxytone.
            [u'(?i)([áéíóú])([ns])$', u'|1\\2es'],
            [u'(?i)(^[bcdfghjklmnñpqrstvwxyz]*)an$', u'\\1anes'],  # clan->clanes
            [u'(?i)([áéíóú])s$', u'|1ses'],
            [u'(?i)(^[bcdfghjklmnñpqrstvwxyz]*)([aeiou])([ns])$', u'\\1\\2\\3es'],  # tren->trenes
            [u'(?i)([aeiouáéó])$', u'\\1s'],  # casa->casas, padre->padres, papá->papás
            [u'(?i)([aeiou])s$', u'\\1s'],    # atlas->atlas, virus->virus, etc.
            [u'(?i)([éí])(s)$', u'|1\\2es'],  # inglés->ingleses
            [u'(?i)z$', u'ces'],              # luz->luces
            [u'(?i)([íú])$', u'\\1es'],       # ceutí->ceutíes, tabú->tabúes
            [u'(?i)(ng|[wckgtp])$', u'\\1s'], # Anglicismos como puenting, frac, crack, show (En que casos podría fallar esto?)
            [u'(?i)$', u'es']  # ELSE +es (v.g. árbol->árboles)
        ]

        lower_cased_word = word.lower()

        for uncountable_word in self.non_changing_words:
            if lower_cased_word[-1 * len(uncountable_word):] == uncountable_word:
                return utils.deunicodify(word, origType)

        for irregular_singular, irregular_plural in self.irregular_words.iteritems():
            match = re.search(u'(?i)(^' + irregular_singular + u')$', word, re.IGNORECASE)
            if match:
                result = re.sub(u'(?i)' + irregular_singular + u'$', match.expand(u'\\1')[0] + irregular_plural[1:], word)
                return utils.deunicodify(result, origType)

        for rule in rules:
            match = re.search(rule[0], word, re.IGNORECASE)
            if match:
                groups = match.groups()
                replacement = rule[1]
                if re.match(u'\|', replacement):
                    for k in range(1, len(groups)):
                        replacement = replacement.replace(u'|' + unicode(
                            k), self.string_replace(groups[k - 1], u'ÁÉÍÓÚáéíóú', u'AEIOUaeiou'))

                result = re.sub(rule[0], replacement, word)
                # Esto acentúa los sustantivos que al pluralizarse se
                # convierten en esdrújulos como esmóquines, jóvenes...
                match = re.search(u'(?i)([aeiou]).{1,3}([aeiou])nes$', result)

                if match and len(match.groups()) > 1 and not re.search(u'(?i)[áéíóú]', word):
                    result = result.replace(match.group(0), self.string_replace(
                        match.group(1), u'AEIOUaeiou', u'ÁÉÍÓÚáéíóú') + match.group(0)[1:])

                return utils.deunicodify(result, origType)

        return utils.deunicodify(word, origType)
Esempio n. 5
0
    def singularize(self, word):
        '''
        Singularizes Spanish nouns.
        Input string can be Unicode (e.g. u"palabras"), or a str encoded in UTF-8 or Latin-1.
        Output string will be encoded the same way as the input.
        '''

        word, origType = utils.unicodify(word)  # all internal calculations are done in Unicode

        rules = [
            [ur'(?i)^([bcdfghjklmnñpqrstvwxyz]*)([aeiou])([ns])es$', u'\\1\\2\\3'],
            [ur'(?i)([aeiou])([ns])es$', u'~1\\2'],
            [ur'(?i)shes$', u'sh'],             # flashes->flash
            [ur'(?i)oides$', u'oide'],          # androides->androide
            [ur'(?i)(sis|tis|xis)$', u'\\1'],   # crisis, apendicitis, praxis
            [ur'(?i)(é)s$', u'\\1'],            # bebés->bebé
            [ur'(?i)(ces)$', u'z'],             # luces->luz
            [ur'(?i)([^e])s$', u'\\1'],         # casas->casa
            [ur'(?i)([bcdfghjklmnñprstvwxyz]{2,}e)s$', u'\\1'],  # cofres->cofre
            [ur'(?i)([ghñptv]e)s$', u'\\1'],    # llaves->llave, radiocasetes->radiocasete
            [ur'(?i)jes$', u'je'],              # ejes->eje
            [ur'(?i)ques$', u'que'],            # tanques->tanque
            [ur'(?i)es$', u'']                  # ELSE remove _es_  monitores->monitor
        ]

        lower_cased_word = word.lower()

        for uncountable_word in self.non_changing_words:
            if lower_cased_word[-1 * len(uncountable_word):] == uncountable_word:
                return utils.deunicodify(word, origType)

        for irregular_singular, irregular_plural in self.irregular_words.iteritems():
            match = re.search(u'(^' + irregular_plural + u')$', word, re.IGNORECASE)
            if match:
                result = re.sub(u'(?i)' + irregular_plural + u'$', match.expand(u'\\1')[0] + irregular_singular[1:], word)
                return utils.deunicodify(result, origType)

        for rule in rules:
            match = re.search(rule[0], word, re.IGNORECASE)
            if match:
                groups = match.groups()
                replacement = rule[1]
                if re.match(u'~', replacement):
                    for k in range(1, len(groups)):
                        replacement = replacement.replace(u'~' + unicode(
                            k), self.string_replace(groups[k - 1], u'AEIOUaeiou', u'ÁÉÍÓÚáéíóú'))

                result = re.sub(rule[0], replacement, word)
                # Esta es una posible solución para el problema de dobles
                # acentos. Un poco guarrillo pero funciona
                match = re.search(u'(?i)([áéíóú]).*([áéíóú])', result)

                if match and len(match.groups()) > 1 and not re.search(u'(?i)[áéíóú]', word):
                    result = self.string_replace(
                        result, u'ÁÉÍÓÚáéíóú', u'AEIOUaeiou')

                return utils.deunicodify(result, origType)

        return utils.deunicodify(word, origType)
    def find_cmd_tool(self, tool_to_find_var_name):
        """ locate the path to a cmd.exe tool on windows, if found put the full path in variable
        :param tool_to_find_var_name: variable name of tool or full path to tool
        :return: the path to the tool
        """
        tool_path = None
        if tool_to_find_var_name in var_stack:
            original_tool_value = var_stack.ResolveVarToStr(tool_to_find_var_name)
            # first try the variable, could be that the tool was already found
            if os.path.isfile(original_tool_value):
                tool_path = original_tool_value

            if tool_path is None:
                # next try to ask the system using the where command
                try:
                    where_tool_path = subprocess.check_output("where " + original_tool_value).strip()
                    where_tool_path = utils.unicodify(where_tool_path)
                    if os.path.isfile(where_tool_path):
                        tool_path = where_tool_path
                        var_stack.set_var(tool_to_find_var_name, "find_cmd_tool").append(tool_path)
                except Exception:
                    pass # never mind, we'll try on our own

            if tool_path is None:
                win_paths = utils.unique_list()
                # try to find the tool in the PATH variable
                if "PATH" in os.environ:
                    # remove newline characters that might lurk in the path (see tech support case 143589)
                    adjusted_path = re.sub('[\r\n]',"?",utils.unicodify(os.environ["PATH"]))
                    win_paths.extend(adjusted_path.split(";"))
                else:
                    print("PATH was not found in environment variables")
                # also add some known location in case user's PATH variable was altered
                if "SystemRoot" in os.environ:
                    system_root = utils.unicodify(os.environ["SystemRoot"])
                    know_locations = (os.path.join(system_root, "System32"),
                                      os.path.join(system_root, "SysWOW64"))
                    win_paths.extend(know_locations)
                for win_path in win_paths:
                    tool_path = os.path.join(win_path, original_tool_value)
                    if os.path.isfile(tool_path):
                        var_stack.set_var(tool_to_find_var_name, "find_cmd_tool ").append(tool_path)
                        break
                else: # break was not called, tool was not found
                    tool_path = None
        return tool_path
Esempio n. 7
0
 def read_yaml_file(self, file_path, *args, **kwargs):
     try:
         with utils.open_for_read_file_or_url(file_path, self.url_translator, self.path_searcher) as file_fd:
             buffer = file_fd.read()
             buffer = utils.unicodify(buffer) # make sure text is unicode
             buffer = io.StringIO(buffer)     # turn text to a stream
             buffer.name = file_path          # this will help identify the file for debugging and messages
             kwargs['path-to-file'] = file_path
             self.read_yaml_from_stream(buffer, *args, **kwargs)
     except (FileNotFoundError, urllib.error.URLError) as ex:
         ignore = kwargs.get('ignore_if_not_exist', False)
         if ignore:
             print("'ignore_if_not_exist' specified, ignoring FileNotFoundError for", file_path)
         else:
             print("Exception reading file:", file_path, ex)
             raise
     except Exception as ex:
         print("Exception reading file:", file_path, ex)
         raise
Esempio n. 8
0
def read_file_or_url(in_file_or_url,
                     config_vars,
                     path_searcher=None,
                     encoding='utf-8',
                     save_to_path=None,
                     checksum=None,
                     connection_obj=None):
    need_to_download = not utils.check_file_checksum(save_to_path, checksum)
    if not need_to_download:
        # if save_to_path contains the correct data just read it by recursively
        # calling read_file_or_url
        return read_file_or_url(save_to_path, config_vars, encoding=encoding)
    match = protocol_header_re.match(os.fspath(in_file_or_url))
    actual_file_path = in_file_or_url
    if not match:  # it's a local file
        if path_searcher is not None:
            actual_file_path = path_searcher.find_file(actual_file_path)
        if actual_file_path:
            if 'Win' in utils.get_current_os_names():
                actual_file_path = os.path.abspath(actual_file_path)
            else:
                actual_file_path = os.path.realpath(actual_file_path)
        else:
            raise FileNotFoundError(
                f"Could not locate local file {in_file_or_url}")
        if encoding is None:
            read_mod = "rb"
        else:
            read_mod = "r"
        with open(actual_file_path, "r", encoding=encoding) as rdf:
            buffer = rdf.read()
    else:
        assert connection_obj, "no connection_obj given"
        session = connection_obj.get_session(in_file_or_url)
        response = session.get(in_file_or_url, timeout=(33.05, 180.05))
        response.raise_for_status()
        buffer = response.text
    buffer = utils.unicodify(buffer)  # make sure text is unicode
    if save_to_path and in_file_or_url != save_to_path:
        with open(save_to_path, "w") as wfd:
            utils.chown_chmod_on_fd(wfd)
            wfd.write(buffer)
    return buffer, actual_file_path
Esempio n. 9
0
def read_from_file_or_url(in_url, translate_url_callback=None, expected_checksum=None, encoding='utf-8'):
    """ Read a file from local disk or url. Check checksum if given.
        If test against either sig or checksum fails - raise IOError.
        Return: file contents.
    """
    with open_for_read_file_or_url(in_url, translate_url_callback, encoding=encoding) as open_file:
        contents_buffer = open_file.fd.read()
        if encoding is not None:  # when reading from url we're not sure what the encoding is
            contents_buffer = utils.unicodify(contents_buffer, encoding=encoding)
        # check sig or checksum only if they were given
        if expected_checksum is not None:
            if len(contents_buffer) == 0:
                raise IOError("Empty contents returned from", in_url, "; expected checksum: ", expected_checksum, "; encoding:", encoding)
            if encoding is not None:
                raise IOError("Checksum check requested for", in_url, "but encoding is not None, encoding:", encoding, "; expected checksum: ", expected_checksum)
            buffer_ok = utils.check_buffer_checksum(contents_buffer, expected_checksum)
            if not buffer_ok:
                actual_checksum = utils.get_buffer_checksum(contents_buffer)
                raise IOError("Checksum mismatch", in_url, "expected checksum: ", expected_checksum,
                              "actual checksum:", actual_checksum, "encoding:", encoding)
    return contents_buffer
Esempio n. 10
0
def get_main_drive_name():
    retVal = None
    try:
        if sys.platform == 'darwin':
            for volume in os.scandir("/Volumes"):
                if volume.is_symlink():
                    resolved_volume_path = Path("/Volumes",
                                                volume.name).resolve()
                    if str(resolved_volume_path) == "/":
                        retVal = volume.name
                        break
            else:
                apple_script = """osascript -e 'return POSIX file (POSIX path of "/") as Unicode text' """
                completed_process = subprocess.run(apple_script,
                                                   stdout=subprocess.PIPE,
                                                   shell=True)
                retVal = utils.unicodify(completed_process.stdout)
                retVal = retVal.strip("\n:")
        elif sys.platform == 'win32':
            import win32api
            retVal = win32api.GetVolumeInformation("C:\\")[0]
    except:
        pass
    return retVal
Esempio n. 11
0
def addItem(pl,
            add_path,
            replace_label=None,
            position=None,
            before_item=None,
            after_item=None,
            section='persistent-apps',
            display_as=1,
            show_as=1,
            arrangement=2,
            tile_type='file-tile',
            label_name=None):
    """adds an item to an existing dock plist object"""
    if display_as is None:
        display_as = 1
    if show_as is None:
        show_as = 0
    if arrangement is None:
        arrangement = 2

    # fix problems with unicode file names
    enc = (sys.stdin.encoding if sys.stdin.encoding else 'utf-8')
    add_path = utils.unicodify(add_path, enc)

    # set a dock label if one isn't provided
    if label_name is None:
        if tile_type == 'url-tile':
            label_name = add_path
            section = 'persistent-others'
        else:
            base_name = re.sub('/$', '', add_path).split('/')[-1]
            label_name = re.sub('.app$', '', base_name)

    # only add if item label isn't already there

    if replace_label != label_name:
        for existing_dock_item in (pl[section]):
            for label_key in ['file-label', 'label']:
                if label_key in existing_dock_item['tile-data']:
                    if existing_dock_item['tile-data'][
                            label_key] == label_name:
                        print(
                            "%s already exists in dock. Use --replacing '%s' to update an existing item"
                            % (label_name, label_name))
                        return False

    if replace_label is not None:
        for item_offset in range(len(pl[section])):
            tile_replace_candidate = pl[section][item_offset]['tile-data']
            if tile_replace_candidate[label_key_for_tile(
                    tile_replace_candidate)] == replace_label:
                verboseOutput('found', replace_label)
                del pl[section][item_offset]
                position = item_offset + 1
                break

    new_guid = generate_guid()
    if tile_type == 'file-tile':
        new_item = {
            'GUID': new_guid,
            'tile-data': {
                'file-data': {
                    '_CFURLString': add_path,
                    '_CFURLStringType': 0
                },
                'file-label': label_name,
                'file-type': 32
            },
            'tile-type': tile_type
        }
    elif tile_type == 'directory-tile':
        if subprocess.Popen(
            ['/usr/bin/sw_vers', '-productVersion'], stdout=subprocess.PIPE
        ).stdout.read().rstrip().split(
                '.'
        )[1] == '4':  # gets the decimal after 10 in sw_vers; 10.4 does not use 10.5 options for stacks
            new_item = {
                'GUID': new_guid,
                'tile-data': {
                    'directory': 1,
                    'file-data': {
                        '_CFURLString': add_path,
                        '_CFURLStringType': 0
                    },
                    'file-label': label_name,
                    'file-type': 2
                },
                'tile-type': tile_type
            }
        else:
            new_item = {
                'GUID': new_guid,
                'tile-data': {
                    'arrangement': arrangement,
                    'directory': 1,
                    'display_as': display_as,
                    'file-data': {
                        '_CFURLString': add_path,
                        '_CFURLStringType': 0
                    },
                    'file-label': label_name,
                    'file-type': 2,
                    'show_as': show_as
                },
                'tile-type': tile_type
            }

    elif tile_type == 'url-tile':
        new_item = {
            'GUID': new_guid,
            'tile-data': {
                'label': label_name,
                'url': {
                    '_CFURLString': add_path,
                    '_CFURLStringType': 15
                }
            },
            'tile-type': tile_type
        }
    else:
        print('unknown type:', tile_type)
        return False

    verboseOutput('adding', new_item)

    if position is not None:
        if position in ['beginning', 'begin', 'first']:
            pl[section].insert(0, new_item)
            return True
        elif position in ['end', 'last']:
            pl[section].append(new_item)
            return True
        elif position in ['middle', 'center']:
            midpoint = int(len(pl[section]) / 2)
            pl[section].insert(midpoint, new_item)
            return True
        else:
            try:
                int(position)
            except Exception:
                print('Invalid position', position)
                return False
            if int(position) == 0:
                pl[section].insert(int(position), new_item)
            elif int(position) > 0:
                pl[section].insert(int(position) - 1, new_item)
            else:
                pl[section].insert(
                    int(position) + len(pl[section]) + 1, new_item)
            return True
    elif after_item is not None or before_item is not None:
        for item_offset in range(len(pl[section])):
            try:
                if after_item is not None:
                    if pl[section][item_offset]['tile-data'][
                            'file-label'] == after_item:
                        pl[section].insert(item_offset + 1, new_item)
                        return True
                if before_item is not None:
                    if pl[section][item_offset]['tile-data'][
                            'file-label'] == before_item:
                        pl[section].insert(item_offset, new_item)
                        return True
            except KeyError:
                pass
    pl[section].append(new_item)
    verboseOutput('item added at end')
    return True
Esempio n. 12
0
    def pluralize(self, word):
        '''
        Pluralizes Spanish nouns.
        Input string can be Unicode (e.g. u"palabra"), or a str encoded in UTF-8 or Latin-1.
        Output string will be encoded the same way as the input.
        '''

        word, origType = utils.unicodify(
            word)  # all internal calculations are done in Unicode

        rules = [
            [u'(?i)([aeiou])x$', u'\\1x'],
            # This could fail if the word is oxytone.
            [u'(?i)([áéíóú])([ns])$', u'|1\\2es'],
            [u'(?i)(^[bcdfghjklmnñpqrstvwxyz]*)an$',
             u'\\1anes'],  # clan->clanes
            [u'(?i)([áéíóú])s$', u'|1ses'],
            [
                u'(?i)(^[bcdfghjklmnñpqrstvwxyz]*)([aeiou])([ns])$',
                u'\\1\\2\\3es'
            ],  # tren->trenes
            [u'(?i)([aeiouáéó])$',
             u'\\1s'],  # casa->casas, padre->padres, papá->papás
            [u'(?i)([aeiou])s$', u'\\1s'],  # atlas->atlas, virus->virus, etc.
            [u'(?i)([éí])(s)$', u'|1\\2es'],  # inglés->ingleses
            [u'(?i)z$', u'ces'],  # luz->luces
            [u'(?i)([íú])$', u'\\1es'],  # ceutí->ceutíes, tabú->tabúes
            [
                u'(?i)(ng|[wckgtp])$', u'\\1s'
            ],  # Anglicismos como puenting, frac, crack, show (En que casos podría fallar esto?)
            [u'(?i)$', u'es']  # ELSE +es (v.g. árbol->árboles)
        ]

        lower_cased_word = word.lower()

        for uncountable_word in self.non_changing_words:
            if lower_cased_word[-1 *
                                len(uncountable_word):] == uncountable_word:
                return utils.deunicodify(word, origType)

        for irregular_singular, irregular_plural in self.irregular_words.iteritems(
        ):
            match = re.search(u'(?i)(^' + irregular_singular + u')$', word,
                              re.IGNORECASE)
            if match:
                result = re.sub(u'(?i)' + irregular_singular + u'$',
                                match.expand(u'\\1')[0] + irregular_plural[1:],
                                word)
                return utils.deunicodify(result, origType)

        for rule in rules:
            match = re.search(rule[0], word, re.IGNORECASE)
            if match:
                groups = match.groups()
                replacement = rule[1]
                if re.match(u'\|', replacement):
                    for k in range(1, len(groups)):
                        replacement = replacement.replace(
                            u'|' + unicode(k),
                            self.string_replace(groups[k - 1], u'ÁÉÍÓÚáéíóú',
                                                u'AEIOUaeiou'))

                result = re.sub(rule[0], replacement, word)
                # Esto acentúa los sustantivos que al pluralizarse se
                # convierten en esdrújulos como esmóquines, jóvenes...
                match = re.search(u'(?i)([aeiou]).{1,3}([aeiou])nes$', result)

                if match and len(match.groups()) > 1 and not re.search(
                        u'(?i)[áéíóú]', word):
                    result = result.replace(
                        match.group(0),
                        self.string_replace(match.group(1), u'AEIOUaeiou',
                                            u'ÁÉÍÓÚáéíóú') +
                        match.group(0)[1:])

                return utils.deunicodify(result, origType)

        return utils.deunicodify(word, origType)
Esempio n. 13
0
    def singularize(self, word):
        '''
        Singularizes Spanish nouns.
        Input string can be Unicode (e.g. u"palabras"), or a str encoded in UTF-8 or Latin-1.
        Output string will be encoded the same way as the input.
        '''

        word, origType = utils.unicodify(
            word)  # all internal calculations are done in Unicode

        rules = [
            [
                ur'(?i)^([bcdfghjklmnñpqrstvwxyz]*)([aeiou])([ns])es$',
                u'\\1\\2\\3'
            ],
            [ur'(?i)([aeiou])([ns])es$', u'~1\\2'],
            [ur'(?i)shes$', u'sh'],  # flashes->flash
            [ur'(?i)oides$', u'oide'],  # androides->androide
            [ur'(?i)(sis|tis|xis)$', u'\\1'],  # crisis, apendicitis, praxis
            [ur'(?i)(é)s$', u'\\1'],  # bebés->bebé
            [ur'(?i)(ces)$', u'z'],  # luces->luz
            [ur'(?i)([^e])s$', u'\\1'],  # casas->casa
            [ur'(?i)([bcdfghjklmnñprstvwxyz]{2,}e)s$',
             u'\\1'],  # cofres->cofre
            [ur'(?i)([ghñptv]e)s$',
             u'\\1'],  # llaves->llave, radiocasetes->radiocasete
            [ur'(?i)jes$', u'je'],  # ejes->eje
            [ur'(?i)ques$', u'que'],  # tanques->tanque
            [ur'(?i)es$', u'']  # ELSE remove _es_  monitores->monitor
        ]

        lower_cased_word = word.lower()

        for uncountable_word in self.non_changing_words:
            if lower_cased_word[-1 *
                                len(uncountable_word):] == uncountable_word:
                return utils.deunicodify(word, origType)

        for irregular_singular, irregular_plural in self.irregular_words.iteritems(
        ):
            match = re.search(u'(^' + irregular_plural + u')$', word,
                              re.IGNORECASE)
            if match:
                result = re.sub(
                    u'(?i)' + irregular_plural + u'$',
                    match.expand(u'\\1')[0] + irregular_singular[1:], word)
                return utils.deunicodify(result, origType)

        for rule in rules:
            match = re.search(rule[0], word, re.IGNORECASE)
            if match:
                groups = match.groups()
                replacement = rule[1]
                if re.match(u'~', replacement):
                    for k in range(1, len(groups)):
                        replacement = replacement.replace(
                            u'~' + unicode(k),
                            self.string_replace(groups[k - 1], u'AEIOUaeiou',
                                                u'ÁÉÍÓÚáéíóú'))

                result = re.sub(rule[0], replacement, word)
                # Esta es una posible solución para el problema de dobles
                # acentos. Un poco guarrillo pero funciona
                match = re.search(u'(?i)([áéíóú]).*([áéíóú])', result)

                if match and len(match.groups()) > 1 and not re.search(
                        u'(?i)[áéíóú]', word):
                    result = self.string_replace(result, u'ÁÉÍÓÚáéíóú',
                                                 u'AEIOUaeiou')

                return utils.deunicodify(result, origType)

        return utils.deunicodify(word, origType)
Esempio n. 14
0
    def __call__(self, *args, **kwargs):
        """ Normally list of arguments are calculated by calling self.get_run_args,
            unless kwargs["run_args"] exists.
        """
        PythonBatchCommandBase.__call__(self, *args, **kwargs)
        run_args = list()
        if "run_args" in kwargs:
            run_args.extend(kwargs["run_args"])
        else:
            self.get_run_args(run_args)
        run_args = list(map(str, run_args))
        self.doing = f"""calling subprocess '{" ".join(run_args)}'"""
        if self.detach:
            pid = os.spawnlp(os.P_NOWAIT, *run_args)
            # in https://docs.python.org/3.6/library/subprocess.html#replacing-the-os-spawn-family
            # the recommended way to replace os.spawnlp(os.P_NOWAIT,.. is by using subprocess.Popen,
            # but it does not work properly
            #pid = subprocess.Popen(run_args).pid
        else:
            if self.script:
                self.shell = True
                assert len(run_args) == 1
            elif self.shell and len(run_args) == 1:
                if sys.platform == 'darwin':  # MacOS needs help with spaces in paths
                    #run_args = shlex.split(run_args[0])
                    #run_args = [p.replace(" ", r"\ ") for p in run_args]
                    #run_args = " ".join(run_args)
                    run_args = run_args[0]
                elif sys.platform == 'win32':
                    run_args = run_args[0]

            out_stream = None
            need_to_close_out_file = False
            if self.out_file:
                if isinstance(self.out_file, (str, os.PathLike, bytes)):
                    out_stream = utils.utf8_open_for_write(self.out_file, "w")
                    need_to_close_out_file = True
                elif hasattr(self.out_file, "write"):  # out_file is already an open file
                    out_stream = self.out_file

            elif self.capture_stdout:
                # this will capture stdout in completed_process.stdout instead of writing directly to stdout
                # so objects overriding handle_completed_process will have access to stdout
                out_stream = subprocess.PIPE
            in_stream = None
            err_stream = subprocess.PIPE

            completed_process = subprocess.run(run_args, check=False, stdin=in_stream, stdout=out_stream, stderr=err_stream, shell=self.shell, bufsize=0)

            if need_to_close_out_file:
                out_stream.close()

            if completed_process.stderr:
                self.stderr = utils.unicodify(completed_process.stderr)
                if self.ignore_all_errors:
                    # in case of ignore_all_errors redirect stderr to stdout so we know there was an error
                    # but it will not be interpreted as an error by whoever is running instl
                    log.info(self.stderr)
                else:
                    if self.stderr_means_err:
                        log.error(self.stderr)
                        if completed_process.returncode == 0:
                            completed_process.returncode = 123
                    else:
                        log.info(self.stderr)
            else:
                pass

            if self.ignore_all_errors:
                completed_process.returncode = 0

            completed_process.check_returncode()

            self.handle_completed_process(completed_process)
Esempio n. 15
0
def win_item_ls(the_path, ls_format, root_folder=None):
    import win32security
    the_parts = dict()
    the_error = None
    the_path_str = os.fspath(the_path)
    if 'p' in ls_format:
        the_parts['p'] = the_path_str
    elif 'P' in ls_format:
        the_parts['P'] = the_path_str

    try:
        the_stats = os.lstat(the_path)

        for format_char in ls_format:
            if format_char == 'T':
                the_parts[format_char] = time.strftime(
                    "%Y/%m/%d %H:%M:%S", time.gmtime(
                        (the_stats[stat.ST_MTIME])))  # modification time
            elif format_char == 'D':
                if 'p' in ls_format.lower():  # 'p' or 'P'
                    if stat.S_ISDIR(the_stats.st_mode):
                        the_parts[format_char] = "<DIR>"
                    else:
                        the_parts[format_char] = ""
            elif format_char == 'S':
                the_parts[format_char] = the_stats[
                    stat.ST_SIZE]  # size in bytes
            elif format_char == 'U':
                try:
                    sd = win32security.GetFileSecurity(
                        the_path_str, win32security.OWNER_SECURITY_INFORMATION)
                    owner_sid = sd.GetSecurityDescriptorOwner()
                    name, domain, __type = win32security.LookupAccountSid(
                        None, owner_sid)
                    the_parts[format_char] = domain + "\\" + name  # user
                except Exception as ex:  # we sometimes get exception: 'LookupAccountSid, No mapping between account names and security IDs was done.'
                    the_parts[format_char] = "Unknown user"

            elif format_char == 'G':
                try:
                    sd = win32security.GetFileSecurity(
                        the_path_str, win32security.GROUP_SECURITY_INFORMATION)
                    owner_sid = sd.GetSecurityDescriptorGroup()
                    name, domain, __type = win32security.LookupAccountSid(
                        None, owner_sid)
                    the_parts[format_char] = domain + "\\" + name  # group
                except Exception as ex:  # we sometimes get exception: 'LookupAccountSid, No mapping between account names and security IDs was done.'
                    the_parts[format_char] = "Unknown group"

            elif format_char == 'C':
                if not (stat.S_ISLNK(the_stats.st_mode)
                        or stat.S_ISDIR(the_stats.st_mode)):
                    the_parts[format_char] = utils.get_file_checksum(the_path)
                else:
                    the_parts[format_char] = ""
            elif format_char == 'P':
                as_posix = PurePath(the_path).as_posix()
                the_parts[format_char] = str(as_posix)
            elif format_char == 'p' and root_folder is not None:
                relative_path = PurePath(the_path).relative_to(
                    PurePath(root_folder))
                the_parts[format_char] = str(relative_path.as_posix())
            elif format_char == 'a' or format_char == 'f':
                import subprocess
                the_parts[format_char] = "[]"
                completed_process = subprocess.run(f'attrib "{the_path_str}"',
                                                   shell=True,
                                                   stdout=subprocess.PIPE,
                                                   stderr=subprocess.PIPE)
                if completed_process.returncode != 0:
                    the_parts[format_char] = utils.unicodify(
                        completed_process.stderr)
                else:
                    ls_line = utils.unicodify(completed_process.stdout)
                    flag_matches = re.search(
                        "(?P<attribs>(A|R|S|H|O|I|X|P|U|\s)+?)\s+[A-Z]:",
                        ls_line)
                    if flag_matches:
                        flags = "".join(flag_matches.group('attribs').split())
                        if flags:
                            the_parts[format_char] = flags

    except Exception as ex:
        the_error = [the_path_str, ex.strerror]

    return the_parts, the_error
Esempio n. 16
0
def unix_item_ls(the_path, ls_format, root_folder=None):
    import grp
    import pwd

    the_parts = dict()
    the_error = None
    the_path_str = os.fspath(the_path)
    if 'p' in ls_format:
        the_parts['p'] = the_path_str
    elif 'P' in ls_format:
        the_parts['P'] = the_path_str

    try:
        the_stats = os.lstat(the_path)

        for format_char in ls_format:
            if format_char == 'I':
                the_parts[format_char] = the_stats[stat.ST_INO]  # inode number
            elif format_char == 'R':
                the_parts[format_char] = utils.unix_permissions_to_str(
                    the_stats.st_mode)  # permissions
            elif format_char == 'L':
                the_parts[format_char] = the_stats[stat.ST_NLINK]  # num links
            elif format_char == 'u':
                try:
                    the_parts[format_char] = str(the_stats[stat.ST_UID])[
                        0]  # unknown user name, get the number
                except Exception:
                    the_parts[format_char] = "no_uid"
            elif format_char == 'U':
                try:
                    the_parts[format_char] = pwd.getpwuid(
                        the_stats[stat.ST_UID])[0]  # user
                except KeyError:
                    the_parts[format_char] = str(the_stats[stat.ST_UID])[
                        0]  # unknown user name, get the number
                except Exception:
                    the_parts[format_char] = "no_uid"
            elif format_char == 'g':
                try:
                    the_parts[format_char] = str(the_stats[stat.ST_GID])[
                        0]  # unknown group name, get the number
                except Exception:
                    the_parts[format_char] = "no_gid"
            elif format_char == 'G':
                try:
                    the_parts[format_char] = grp.getgrgid(
                        the_stats[stat.ST_GID])[0]  # group
                except KeyError:
                    the_parts[format_char] = str(the_stats[stat.ST_GID])[
                        0]  # unknown group name, get the number
                except Exception:
                    the_parts[format_char] = "no_gid"
            elif format_char == 'S':
                the_parts[format_char] = the_stats[
                    stat.ST_SIZE]  # size in bytes
            elif format_char == 'T':
                the_parts[format_char] = time.strftime(
                    "%Y/%m/%d-%H:%M:%S", time.gmtime(
                        (the_stats[stat.ST_MTIME])))  # modification time
            elif format_char == 'C':
                if not (stat.S_ISLNK(the_stats.st_mode)
                        or stat.S_ISDIR(the_stats.st_mode)):
                    the_parts[format_char] = utils.get_file_checksum(the_path)
                else:
                    the_parts[format_char] = ""
            elif format_char == 'P' or format_char == 'p':
                path_to_return = the_path_str
                if format_char == 'p' and root_folder is not None:
                    path_to_return = os.path.relpath(the_path,
                                                     start=root_folder)

                # E will bring us Extra data (path postfix) but we want to know if it's DIR in any case
                if stat.S_ISDIR(the_stats.st_mode) and 'D' in ls_format:
                    path_to_return += '/'

                if 'E' in ls_format:
                    if stat.S_ISLNK(the_stats.st_mode):
                        path_to_return += '@'
                    elif not stat.S_ISDIR(the_stats.st_mode) and (
                            the_stats.st_mode &
                        (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)):
                        path_to_return += '*'
                    elif stat.S_ISSOCK(the_stats.st_mode):
                        path_to_return += '='
                    elif stat.S_ISFIFO(the_stats.st_mode):
                        path_to_return += '|'

                the_parts[format_char] = path_to_return
            elif format_char == 'a' or format_char == 'f':
                import subprocess
                completed_process = subprocess.run(f'ls -lO "{the_path_str}"',
                                                   shell=True,
                                                   stdout=subprocess.PIPE,
                                                   stderr=subprocess.PIPE)
                if completed_process.returncode != 0:
                    the_parts[format_char] = utils.unicodify(
                        completed_process.stderr)
                else:
                    ls_line = utils.unicodify(completed_process.stdout)
                    flag_matches = re.findall(
                        "arch|archived|opaque|nodump|sappnd|sappend|schg|schange|simmutable|uappnd|uappend|uchg|uchange|uimmutable|hidden",
                        ls_line)
                    if flag_matches:
                        the_parts[format_char] = ",".join(flag_matches)
                    else:
                        the_parts[format_char] = "[]"

    except Exception as ex:
        the_error = [the_path_str, ex.strerror]

    return the_parts, the_error
Esempio n. 17
0
def addItem(pl, add_path, replace_label=None, position=None, before_item=None, after_item=None, section='persistent-apps', display_as=1, show_as=1, arrangement=2, tile_type='file-tile',label_name=None):
    """adds an item to an existing dock plist object"""
    if display_as is None:
        display_as = 1
    if show_as is None:
        show_as = 0
    if arrangement is None:
        arrangement = 2

    # fix problems with unicode file names
    enc = (sys.stdin.encoding if sys.stdin.encoding else 'utf-8')
    add_path = utils.unicodify(add_path, enc)

    # set a dock label if one isn't provided
    if label_name is None:
        if tile_type == 'url-tile':
            label_name = add_path
            section = 'persistent-others'
        else:
            base_name = re.sub('/$', '', add_path).split('/')[-1]
            label_name = re.sub('.app$', '', base_name)


    # only add if item label isn't already there

    if replace_label != label_name:
        for existing_dock_item in (pl[section]):
            for label_key in ['file-label','label']:
                if label_key in existing_dock_item['tile-data']:
                    if existing_dock_item['tile-data'][label_key] == label_name:
                        print("%s already exists in dock. Use --replacing '%s' to update an existing item" % (label_name, label_name))
                        return False



    if replace_label is not None:
        for item_offset in range(len(pl[section])):
            tile_replace_candidate = pl[section][item_offset]['tile-data']
            if tile_replace_candidate[label_key_for_tile(tile_replace_candidate)] == replace_label:
                verboseOutput('found', replace_label)
                del pl[section][item_offset]
                position = item_offset + 1
                break

    new_guid = generate_guid()
    if tile_type == 'file-tile':
        new_item = {'GUID': new_guid, 'tile-data': {'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0},'file-label': label_name, 'file-type': 32}, 'tile-type': tile_type}
    elif tile_type == 'directory-tile':
        if subprocess.Popen(['/usr/bin/sw_vers', '-productVersion'],
                stdout=subprocess.PIPE).stdout.read().rstrip().split('.')[1] == '4': # gets the decimal after 10 in sw_vers; 10.4 does not use 10.5 options for stacks
            new_item = {'GUID': new_guid, 'tile-data': {'directory': 1, 'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0}, 'file-label': label_name, 'file-type': 2 }, 'tile-type': tile_type}
        else:
            new_item = {'GUID': new_guid, 'tile-data': {'arrangement': arrangement, 'directory': 1, 'display_as': display_as, 'file-data': {'_CFURLString': add_path, '_CFURLStringType': 0}, 'file-label': label_name, 'file-type': 2, 'show_as': show_as}, 'tile-type': tile_type}

    elif tile_type == 'url-tile':
        new_item = {'GUID': new_guid, 'tile-data': {'label': label_name, 'url': {'_CFURLString': add_path, '_CFURLStringType': 15}}, 'tile-type': tile_type}
    else:
        print('unknown type:', tile_type)
        sys.exit(1)

    verboseOutput('adding', new_item)

    if position is not None:
        if position in [ 'beginning', 'begin', 'first' ]:
            pl[section].insert(0, new_item)
            return True
        elif position in [ 'end', 'last' ]:
            pl[section].append(new_item)
            return True
        elif position in [ 'middle', 'center' ]:
            midpoint = int(len(pl[section])/2)
            pl[section].insert(midpoint, new_item)
            return True
        else:
            try:
                int(position)
            except Exception:
                print('Invalid position', position)
                return False
            if int(position) == 0:
                pl[section].insert(int(position), new_item)
            elif int(position) > 0:
                pl[section].insert(int(position)-1, new_item)
            else:
                pl[section].insert(int(position)+len(pl[section])+1, new_item)
            return True
    elif after_item is not None or before_item is not None:
        for item_offset in range(len(pl[section])):
            try:
                if after_item is not None:
                    if pl[section][item_offset]['tile-data']['file-label'] == after_item:
                        pl[section].insert(item_offset+1, new_item)
                        return True
                if before_item is not None:
                    if pl[section][item_offset]['tile-data']['file-label'] == before_item:
                        pl[section].insert(item_offset, new_item)
                        return True
            except KeyError:
                pass
    pl[section].append(new_item)
    verboseOutput('item added at end')
    return True
Esempio n. 18
0
 def __init__(self, name, description="", *values):
     self.__name = utils.unicodify(name)
     self.__description = utils.unicodify(description)
     self.resolved_num = 0
     self.__values = list()
     ConfigVar.extend(self, values) # explicit call so ConstConfigVar can be initialized