Пример #1
0
def post_install() -> None:
    """Post installation script."""
    import appdirs

    def copytree(src: str, tgt: str) -> None:
        import glob
        import shutil
        print(f"copying {src} -> {tgt}")
        for srcsdir in glob.glob(os.path.join(src, '*')):
            tgtsdir = os.path.join(tgt, os.path.basename(srcsdir))
            if os.path.exists(tgtsdir):
                shutil.rmtree(tgtsdir)
            try:
                shutil.copytree(srcsdir, tgtsdir)
            except shutil.Error as err:  # unknown error
                print(f"directory not copied: {str(err)}")
            except OSError as err:  # directory doesn't exist
                print(f"directory not copied: {str(err)}")

    print('running postinstall')

    # copy user workspaces
    user_src_base = str(Path('.', 'data', 'user'))
    user_tgt_base = appdirs.user_data_dir(appname=PKGNAME, appauthor=AUTHOR)
    user_tgt_base = str(Path(user_tgt_base, 'workspaces'))
    copytree(user_src_base, user_tgt_base)

    # copy site workspaces
    site_src_base = str(Path('.', 'data', 'site'))
    site_tgt_base = appdirs.site_data_dir(appname=PKGNAME, appauthor=AUTHOR)
    site_tgt_base = str(Path(site_tgt_base, 'workspaces'))
    copytree(site_src_base, site_tgt_base)
Пример #2
0
 def site_data_dir(self):
     """Return ``site_data_dir``."""
     directory = appdirs.site_data_dir(self.appname, self.appauthor,
                          version=self.version, multipath=self.multipath)
     if self.create:
         self._ensure_directory_exists(directory)
     return directory
Пример #3
0
def init(site=False):
    "Prompt the user for config"

    if site:
        base_dir = appdirs.site_data_dir(APP_NAME, APP_AUTHOR)
    else:
        base_dir = appdirs.user_data_dir(APP_NAME, APP_AUTHOR)

    if not os.path.exists(base_dir):
        os.makedirs(base_dir)

    config_path = os.path.join(base_dir, 'config.py')

    incomplete = os.path.realpath(prompt('Incomplete directory', 'incomplete'))
    downloads = os.path.realpath(prompt('Downloads directory', 'downloads'))
    torrents = os.path.realpath(prompt('Torrents directory', 'torrents'))

    putio_token = prompt('OAuth Token')

    config_dist = find_config_dist()
    with open(config_dist, 'r') as source:
        contents = (source.read().replace(
            "os.getenv('PUTIO_TOKEN')",
            "os.getenv('PUTIO_TOKEN', '" + putio_token + "')").replace(
                "/files/downloads",
                downloads).replace("/files/incomplete",
                                   incomplete).replace("/files/torrents",
                                                       torrents))

        with open(config_path, 'w') as destination:
            destination.write(contents)

        os.chmod(config_path, stat.S_IRUSR | stat.S_IWUSR)

        print '\nConfig written to %s' % config_path
Пример #4
0
 def site_data_dir(self):
     """Return ``site_data_dir``."""
     directory = appdirs.site_data_dir(self.appname, self.appauthor,
                          version=self.version, multipath=self.multipath)
     if self.create:
         self._ensure_directory_exists(directory)
     return directory
Пример #5
0
def datapath():
    '''Returns the path where app data is to be installed.'''
    import appdirs
    if iamroot():
        return appdirs.site_data_dir(appname, appauthor)
    else:
        return appdirs.user_data_dir(appname, appauthor)
Пример #6
0
def datapath():
    '''Returns the path where app data is to be installed.'''
    import appdirs
    if iamroot():
        return appdirs.site_data_dir(appname, appauthor)
    else:
        return appdirs.user_data_dir(appname, appauthor)
Пример #7
0
 def _find_hunspell_files(self):
     lang = self.lang
     base_dirs = site_data_dir('hunspell', multipath=True).split(os.pathsep)
     for base_dir in base_dirs:
         aff_path = os.path.join(base_dir, lang + '.aff')
         dic_path = os.path.join(base_dir, lang + '.dic')
         if os.path.exists(aff_path) and os.path.exists(dic_path):
             return dic_path, aff_path
Пример #8
0
 def test_helpers(self):
     self.assertIsInstance(appdirs.user_data_dir('MyApp', 'MyCompany'),
                           STRING_TYPE)
     self.assertIsInstance(appdirs.site_data_dir('MyApp', 'MyCompany'),
                           STRING_TYPE)
     self.assertIsInstance(appdirs.user_cache_dir('MyApp', 'MyCompany'),
                           STRING_TYPE)
     self.assertIsInstance(appdirs.user_log_dir('MyApp', 'MyCompany'),
                           STRING_TYPE)
Пример #9
0
 def __init__(self, device, description, **kwargs):
     super().__init__(device, description, **kwargs)
     # compute path where fan/pump settings will be stored
     # [/usr/local/share/]liquidctl/2433_b200_0100/usb1_12/
     ids = '{:04x}_{:04x}_{:04x}'.format(self.vendor_id, self.product_id, self.release_number)
     location = '{}_{}'.format(self.bus, '.'.join(map(str, self.port)))
     self._data_path = os.path.join(appdirs.site_data_dir('liquidctl', 'jonasmalacofilho'),
                                    ids, location)
     LOGGER.debug('data directory for device is %s', self._data_path)
Пример #10
0
 def test_helpers(self):
     self.assertIsInstance(
         appdirs.user_data_dir('MyApp', 'MyCompany'), STRING_TYPE)
     self.assertIsInstance(
         appdirs.site_data_dir('MyApp', 'MyCompany'), STRING_TYPE)
     self.assertIsInstance(
         appdirs.user_cache_dir('MyApp', 'MyCompany'), STRING_TYPE)
     self.assertIsInstance(
         appdirs.user_log_dir('MyApp', 'MyCompany'), STRING_TYPE)
Пример #11
0
 def test_helpers(self):
     self.assertTrue(isinstance(
         appdirs.user_data_dir('MyApp', 'MyCompany'), str))
     self.assertTrue(isinstance(
         appdirs.site_data_dir('MyApp', 'MyCompany'), str))
     self.assertTrue(isinstance(
         appdirs.user_cache_dir('MyApp', 'MyCompany'), str))
     self.assertTrue(isinstance(
         appdirs.user_log_dir('MyApp', 'MyCompany'), str))
Пример #12
0
def get_data_dir(parent_path, prog_name):
    """get data path"""
    pathnames = []
    ### this directory contain unalterable program data
    pathnames.append(appdirs.user_data_dir(
        prog_name, PROG_AUTHOR))  # XDG Specification: ~/.local/share/prog_name
    pathnames.append(appdirs.site_data_dir(
        prog_name,
        PROG_AUTHOR))  # should be: /usr/share but return /usr/share/xfce4
    pathnames.append(os.path.join(parent_path, DATA_DIRECTORY))
    #print(pathnames)
    return pathnames
Пример #13
0
def resource_path(filename):
    # we are inside singlefile pyinstaller
    paths = appdirs.site_data_dir(appname=__app_id__, appauthor=False, multipath=True).split(pathsep)
    if getattr(sys, 'frozen', False):
        paths.insert(0, sys._MEIPASS)
    else:
        paths.append(split_path(__file__)[0])
    for dir_ in paths:
        path = join(dir_, filename)
        if exists(path):
            return path
    dir_ = appdirs.user_data_dir(appname=__app_id__, appauthor=__author__)
    return join(dir_, filename)
Пример #14
0
 def __init__(self, device, description, **kwargs):
     super().__init__(device, description, **kwargs)
     # compute path where fan/pump settings will be stored
     # [/run]/liquidctl/2433_b200_0100/usb1_12/
     ids = '{:04x}_{:04x}_{:04x}'.format(self.vendor_id, self.product_id,
                                         self.release_number)
     location = '{}_{}'.format(self.bus, '.'.join(map(str, self.port)))
     if sys.platform.startswith('linux') and os.path.isdir('/run'):
         basedir = '/run/liquidctl'
     else:
         basedir = appdirs.site_data_dir('liquidctl', 'jonasmalacofilho')
     self._data_path = os.path.join(basedir, ids, location)
     self._data_cache = {}
Пример #15
0
def global_data_dir():
    """Return the global Intake catalog dir for the current environment"""

    if VIRTUALENV_VAR in os.environ:
        prefix = os.environ[VIRTUALENV_VAR]
    else:
        prefix = conda_prefix()

    if prefix:
        # conda and virtualenv use Linux-style directory pattern
        return os.path.join(prefix, 'share', 'intake')
    else:
        return appdirs.site_data_dir(appname='intake', appauthor='intake')
Пример #16
0
    def __init__(self, name, clone_path):
        """Initialize the repository.

        Args:
            name (unicode):
                The configured name of the repository.

            clone_path (unicode):
                The path of the git remote to clone.
        """
        self.name = name
        self.clone_path = clone_path
        self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
                                      'repositories', name)
Пример #17
0
    def __init__(self, name, clone_path):
        """Initialize the repository.

        Args:
            name (unicode):
                The configured name of the repository.

            clone_path (unicode):
                The path of the hg repository to clone.
        """
        self.name = name
        self.clone_path = clone_path
        self.repo_path = os.path.join(appdirs.site_data_dir('reviewbot'),
                                      'repositories', name)
Пример #18
0
def postinstall():
    """Post installation script."""

    import appdirs

    def copytree(src, tgt):

        import glob
        import os
        import shutil

        print(('copying %s -> %s' % (src, tgt)))

        for srcsdir in glob.glob(os.path.join(src, '*')):
            tgtsdir = os.path.join(tgt, os.path.basename(srcsdir))

            if os.path.exists(tgtsdir):
                shutil.rmtree(tgtsdir)

            try:
                shutil.copytree(srcsdir, tgtsdir)

            # directories are the same
            except shutil.Error as e:
                print(('directory not copied. Error: %s' % e))

            # any error saying that the directory doesn't exist
            except OSError as e:
                print(('directory not copied. Error: %s' % e))

        return True

    print('running postinstall')

    appname = 'nemoa'
    appauthor = 'Froot'

    # copy user workspaces
    user_src_base = getpath(('data', 'user'))
    user_tgt_base = appdirs.user_data_dir(appname=appname, appauthor=appauthor)
    user_tgt_base = getpath((user_tgt_base, 'workspaces'))
    copytree(user_src_base, user_tgt_base)

    # copy site workspaces
    site_src_base = getpath(('data', 'site'))
    site_tgt_base = appdirs.site_data_dir(appname=appname, appauthor=appauthor)
    site_tgt_base = getpath((site_tgt_base, 'workspaces'))
    copytree(site_src_base, site_tgt_base)
Пример #19
0
def getstorage(name, *args, **kwargs):
    """Get paths to storage directories.

    This function maps generic names of storage directory to platform
    specific paths which allows platform independent usage of storage
    directories. This is a wrapper function to the module 'appdirs'.
    For details and usage see:

        http://github.com/ActiveState/appdirs

    Args:
        name (string): Storage path name: String describing storage
            directory. Allowed values are:

            'user_cache_dir' -- User specific cache directory
            'user_config_dir' -- User specific configuration directory
            'user_data_dir' -- User specific data directory
            'user_log_dir' -- User specific logging directory
            'site_config_dir' -- Site specific configuration directory
            'site_data_dir' -- Site specific data directory

        *args: Arguments passed to appdirs
        **kwargs: Keyword Arguments passed to appdirs

    Returns:
        String containing path of storage directory or False if
        storage path name is not supported.

    """

    import appdirs

    if name == 'user_cache_dir':
        return appdirs.user_cache_dir(*args, **kwargs)
    elif name == 'user_config_dir':
        return appdirs.user_config_dir(*args, **kwargs)
    elif name == 'user_data_dir':
        return appdirs.user_data_dir(*args, **kwargs)
    elif name == 'user_log_dir':
        return appdirs.user_log_dir(*args, **kwargs)
    elif name == 'user_cwd':
        return getcwd()
    elif name == 'site_config_dir':
        return appdirs.site_config_dir(*args, **kwargs)
    elif name == 'site_data_dir':
        return appdirs.site_data_dir(*args, **kwargs)

    return False
Пример #20
0
def global_data_dir():
    """Return the global Intake catalog dir for the current environment"""

    if VIRTUALENV_VAR in os.environ:
        prefix = os.environ[VIRTUALENV_VAR]
    elif CONDA_VAR in os.environ:
        prefix = sys.prefix
    elif which('conda'):
        # conda exists but is not activated
        prefix = conda_prefix()
    
    if prefix:
        # conda and virtualenv use Linux-style directory pattern
        return os.path.join(prefix, 'share', 'intake')
    else:
        return appdirs.site_data_dir(appname='intake', appauthor='intake')
Пример #21
0
def global_data_dir():
    """Return the global Intake catalog dir for the current environment"""
    prefix = False
    if VIRTUALENV_VAR in os.environ:
        prefix = os.environ[VIRTUALENV_VAR]
    elif CONDA_VAR in os.environ:
        prefix = sys.prefix
    elif which('conda'):
        # conda exists but is not activated
        prefix = conda_prefix()

    if prefix:
        # conda and virtualenv use Linux-style directory pattern
        return make_path_posix(os.path.join(prefix, 'share', 'intake'))
    else:
        return appdirs.site_data_dir(appname='intake', appauthor='intake')
Пример #22
0
    def get_storage_directory(cls, username=None):
        """
        Retrieve path to the directory where all daemon
        registered under a specific username will be stored.
        If no `username` is provided, username under which current daemon
        was installed will be used.

        :param username: the user

        """
        if cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY in os.environ:
            return cls.get_daemon_storage_dir()

        if os.name == 'nt':
            return appdirs.site_data_dir('cloudify-agent', 'Cloudify')

        if username is None and cls.CLOUDIFY_DAEMON_USER_KEY in os.environ:
            username = cls.get_daemon_user()
        return os.path.join(get_home_dir(username), '.cfy-agent')
Пример #23
0
def find_word_list(cfg_word_list_f):
    if not isinstance(cfg_word_list_f, str):
        return None
    try:
        cfg_word_list_f % {'site_data_dir': ''}
    except TypeError:
        return cfg_word_list_f

    site_data_dirs = ['.'] + site_data_dir("bandit", "",
                                           multipath=True).split(':')
    for dir in site_data_dirs:
        word_list_path = cfg_word_list_f % {'site_data_dir': dir}
        if os.path.isfile(word_list_path):
            if dir == ".":
                warnings.warn("Using relative path for word_list: %s"
                              % word_list_path)
            return word_list_path

    raise RuntimeError("Could not substitute '%(site_data_dir)s' "
                       "to a path with a valid word_list file")
Пример #24
0
def find_word_list(cfg_word_list_f):
    if not isinstance(cfg_word_list_f, str):
        return None
    try:
        cfg_word_list_f % {'site_data_dir': ''}
    except TypeError:
        return cfg_word_list_f

    site_data_dirs = ['.'] + site_data_dir("bandit", "",
                                           multipath=True).split(':')
    for dir in site_data_dirs:
        word_list_path = cfg_word_list_f % {'site_data_dir': dir}
        if os.path.isfile(word_list_path):
            if dir == ".":
                warnings.warn("Using relative path for word_list: %s"
                              % word_list_path)
            return word_list_path

        raise RuntimeError("Could not substitute '%(site_data_dir)s' "
                           "to a path with a valid word_list file")
Пример #25
0
    def get_storage_directory(cls, username=None):

        """
        Retrieve path to the directory where all daemon
        registered under a specific username will be stored.
        If no `username` is provided, username under which current daemon
        was installed will be used.

        :param username: the user

        """
        if cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY in os.environ:
            return cls.get_daemon_storage_dir()

        if os.name == 'nt':
            return appdirs.site_data_dir('cloudify-agent', 'Cloudify')

        if username is None and cls.CLOUDIFY_DAEMON_USER_KEY in os.environ:
            username = cls.get_daemon_user()
        return os.path.join(get_home_dir(username), '.cfy-agent')
Пример #26
0
def GetRepData(fichier=""):
    # Vérifie si un répertoire 'Portable' existe
    chemin = Chemins.GetMainPath("Portable")
    if os.path.isdir(chemin):
        chemin = os.path.join(chemin, "Data")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)
        return os.path.join(chemin, fichier)

    # Recherche s'il existe un chemin personnalisé dans le Customize.ini
    chemin = UTILS_Customize.GetValeur("repertoire_donnees", "chemin", "")
    #chemin = chemin.decode("iso-8859-15")
    if chemin != "" and os.path.isdir(chemin):
        return os.path.join(chemin, fichier)

    # Recherche le chemin du répertoire des données
    if sys.platform == "win32" and platform.release() != "Vista":

        chemin = appdirs.site_data_dir(appname=None, appauthor=False)
        #chemin = chemin.decode("iso-8859-15")

        chemin = os.path.join(chemin, "teamworks")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

    else:

        chemin = appdirs.user_data_dir(appname=None, appauthor=False)
        #chemin = chemin.decode("iso-8859-15")

        chemin = os.path.join(chemin, "teamworks")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

        chemin = os.path.join(chemin, "Data")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

    # Ajoute le dirname si besoin
    return os.path.join(chemin, fichier)
Пример #27
0
def GetRepData(fichier=""):
    # Vérifie si un répertoire 'Portable' existe
    chemin = Chemins.GetMainPath("Portable")
    if os.path.isdir(chemin):
        chemin = os.path.join(chemin, "Data")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)
        return os.path.join(chemin, fichier)

    # Recherche s'il existe un chemin personnalisé dans le Customize.ini
    chemin = UTILS_Customize.GetValeur("repertoire_donnees", "chemin", "")
    #chemin = chemin.decode("iso-8859-15")
    if chemin != "" and os.path.isdir(chemin):
        return os.path.join(chemin, fichier)

    # Recherche le chemin du répertoire des données
    if sys.platform == "win32" and platform.release() != "Vista" :

        chemin = appdirs.site_data_dir(appname=None, appauthor=False)
        #chemin = chemin.decode("iso-8859-15")

        chemin = os.path.join(chemin, "teamworks")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

    else :

        chemin = appdirs.user_data_dir(appname=None, appauthor=False)
        #chemin = chemin.decode("iso-8859-15")

        chemin = os.path.join(chemin, "teamworks")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

        chemin = os.path.join(chemin, "Data")
        if not os.path.isdir(chemin):
            os.mkdir(chemin)

    # Ajoute le dirname si besoin
    return os.path.join(chemin, fichier)
Пример #28
0
class FlaskConfig:
    """
    Flask base config settings
    """

    HOST = "0.0.0.0"
    DATA_DIR = appdirs.site_data_dir(something.__name__, something.__author__)
    # Config modified from https://stackoverflow.com/a/7507842
    LOG_CONFIG = nested_dict()
    LOG_CONFIG.update(
        {
            "version": 1,
            "disable_existing_loggers": True,
            "formatters": {
                "standard": {
                    "format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
                }
            },
            "handlers": {
                "default": {
                    "class": "logging.handlers.RotatingFileHandler",
                    "level": "INFO",
                    "formatter": "standard",
                    "filename": "something.log",
                    "maxBytes": 1024,
                    "backupCount": 3,
                }
            },
            "loggers": {
                "": {  # root logger
                    "handlers": ["default"],
                    "level": "DEBUG",
                    "propagate": True,
                }
            },
        }
    )
Пример #29
0
def get_resource_stream(path):
    """
    Return a stream to the contents of a named resource.
    
    :param path: the path to the resource
    :returns: a file-like stream to the resource
    """
    global _APPNAME, _APPAUTHOR

    # Resources in the ~/.local/share/dirt or AppData folder should shadow
    # the packaged resources.
    f = None
    try:
        full_path = os.path.join(appdirs.user_data_dir(_APPNAME, _APPAUTHOR),
                                 path)
        f = open(full_path, 'rb')
        return f
    except FileNotFoundError:
        pass

    # Not in the local one... How about system-wide?
    try:
        full_path = os.path.join(appdirs.site_data_dir(_APPNAME, _APPAUTHOR),
                                 path)
        f = open(full_path, 'rb')
        return f
    except FileNotFoundError:
        pass

    # It wasn't found in the user's directories,
    # so we just use the packaged resource.
    if resource_exists('dirt', path):
        return resource_stream('dirt', path)

    # Couldn't find it :/
    return None
Пример #30
0
def main():
    """Main function of the script"""
    parser = get_argparser()
    args = parser.parse_args()

    app_name = 'debian_crossgrader_package_check'
    storage_dir = appdirs.site_data_dir(app_name)

    if args.cleanup:
        if os.path.isdir(storage_dir):
            shutil.rmtree(storage_dir)
            print('package_check data folder removed.')
        else:
            print('package_check data folder did not exist.')
    else:
        out_file = 'packages.txt'
        os.makedirs(storage_dir, exist_ok=True)

        file_name = os.path.join(storage_dir, out_file)

        if not os.path.isfile(file_name):
            save_package_list(file_name)
        else:
            compare_package_list(file_name)
Пример #31
0
 def get_machine_config_file_path(self, in_app_name):
     retVal = os.path.join(appdirs.site_data_dir(in_app_name))
     logging.debug("%s", retVal)
     return retVal
Пример #32
0
    url = get_config().get('url', 'https://api.binstar.org')


    return Binstar(token, domain=url,)

def load_config(config_file):
    if exists(config_file):
        with open(config_file) as fd:
            data = yaml.load(fd)
            if data:
                return data

    return {}

SITE_CONFIG = join(appdirs.site_data_dir('binstar', 'ContinuumIO'), 'config.yaml')
USER_CONFIG = join(appdirs.user_data_dir('binstar', 'ContinuumIO'), 'config.yaml')
USER_LOGDIR = appdirs.user_log_dir('binstar', 'ContinuumIO')

def get_config(user=True, site=True):

    config = {}
    if site:
        config.update(load_config(SITE_CONFIG))
    if user:
        config.update(load_config(USER_CONFIG))

    return config

def set_config(data, user=True):
Пример #33
0
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
import os
import appdirs

# Directions
NORTH = 3
EAST = 0
SOUTH = 1
WEST = 2

# Cell size
SIZE = 32

# Robbo steps
STEPS = (SIZE, 0), (0, SIZE), (-SIZE, 0), (0, -SIZE)
STOP = -1

# Scroll steps
SCROLL_UP = SIZE // 4
SCROLL_DOWN = -SCROLL_UP

# Directories
CONFIG_FILE = os.path.join(appdirs.user_config_dir(), 'pyrobbo.yml')
USER_DATA_DIR = appdirs.user_data_dir('pyrobbo', False, roaming=True)
DATA_DIRS = [USER_DATA_DIR] + appdirs.site_data_dir(
    'pyrobbo', False, multipath=True).split(os.pathsep)
Пример #34
0
def database_path():
    directory = _ensure_dir_exists(site_data_dir(__title__)) or \
                _ensure_dir_exists(user_data_dir(__title__))
    if not directory:
        raise EnvironmentError("Could not find a suitable data directory.")
    return os.path.join(directory, DATABASE)
Пример #35
0
        token,
        domain=url,
    )


def load_config(config_file):
    if exists(config_file):
        with open(config_file) as fd:
            data = yaml.load(fd)
            if data:
                return data

    return {}


SITE_CONFIG = join(appdirs.site_data_dir('binstar', 'ContinuumIO'),
                   'config.yaml')
USER_CONFIG = join(appdirs.user_data_dir('binstar', 'ContinuumIO'),
                   'config.yaml')
USER_LOGDIR = appdirs.user_log_dir('binstar', 'ContinuumIO')


def get_config(user=True, site=True):

    config = {}
    if site:
        config.update(load_config(SITE_CONFIG))
    if user:
        config.update(load_config(USER_CONFIG))

    return config
Пример #36
0
def system_extensions():
    return os.path.join(appdirs.site_data_dir('SuperCollider'), 'Extensions')
Пример #37
0
 def base_dir(self):
     if is_docker_container():
         return "/models"
     return os.path.join(site_data_dir(appname="social_manager"), "models")
Пример #38
0
import os

import appdirs
from pracmln._version import APPNAME, APPAUTHOR

root = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
user_data = appdirs.user_data_dir(APPNAME, APPAUTHOR)

if os.path.basename(root).startswith('python'):
    root = os.path.realpath(os.path.join(root, '..'))
    app_data = root
else:
    app_data = appdirs.site_data_dir(APPNAME, APPAUTHOR)
    if not os.path.exists(app_data):
        app_data = user_data

trdparty = os.path.join(app_data, '3rdparty')
examples = os.path.join(app_data, 'examples')
etc = os.path.join(app_data, 'etc')
Пример #39
0
    and returns the last directory if none of them match.
    """
    for directory in directories:
        if os.path.exists(os.path.join(directory, CONFIG_FILE)):
            return directory
    return directories[-1] # Fallback to final if all else fails
    

def on_exit():
    """
    Automatically saves playdata and configuration info on exit.
    """
    if override_location:
        location = override_location
    else:
        location = get_directory(program_dir, site_dir, local_dir)
    config.save(location)
    playdata.save(location)

override_location = None # Manual definition by commandline argument - unimplemented
program_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
site_dir = appdirs.site_data_dir(GAME_FILE, GAME_AUTHOR)
local_dir = appdirs.user_data_dir(GAME_FILE, GAME_AUTHOR)
config = ConfigFile()
config.load(program_dir, site_dir, local_dir)
playdata = PlayData()
playdata.load(program_dir, site_dir, local_dir)
atexit.register(on_exit)

#### BETA RELEASE CODE - FOR SKY ERASER DEVELOPERS AND TESTERS ONLY ####
Пример #40
0
def instl_own_main(argv):
    """ Main instl entry point. Reads command line options and decides if to go into interactive or client mode.
    """
    with InvocationReporter(argv, report_own_progress=False):

        argv = argv.copy(
        )  # argument argv is usually sys.argv, which might change with recursive process calls
        options = CommandLineOptions()
        command_names = read_command_line_options(options, argv[1:])
        initial_vars = {
            "__INSTL_EXE_PATH__":
            get_path_to_instl_app(),
            "__CURR_WORKING_DIR__":
            utils.safe_getcwd(
            ),  # the working directory when instl was launched
            "__INSTL_LAUNCH_COMMAND__":
            get_instl_launch_command(),
            "__INSTL_DATA_FOLDER__":
            get_data_folder(),
            "__INSTL_DEFAULTS_FOLDER__":
            "$(__INSTL_DATA_FOLDER__)/defaults",
            "__INSTL_COMPILED__":
            str(getattr(sys, 'frozen', False)),
            "__PYTHON_VERSION__":
            sys.version_info,
            "__PLATFORM_NODE__":
            platform.node(),
            "__PYSQLITE3_VERSION__":
            sqlite3.version,
            "__SQLITE_VERSION__":
            sqlite3.sqlite_version,
            "__COMMAND_NAMES__":
            command_names,
            "__CURRENT_OS__":
            os_family_name,
            "__CURRENT_OS_SECOND_NAME__":
            os_second_name,
            "__CURRENT_OS_NAMES__":
            current_os_names,
            "__CURRENT_OS_DESCRIPTION__":
            utils.get_os_description(),
            "__SITE_DATA_DIR__":
            os.path.normpath(appdirs.site_data_dir()),
            "__SITE_CONFIG_DIR__":
            os.path.normpath(appdirs.site_config_dir()),
            "__USER_DATA_DIR__":
            os.path.normpath(appdirs.user_data_dir()),
            "__USER_CONFIG_DIR__":
            os.path.normpath(appdirs.user_config_dir()),
            "__USER_HOME_DIR__":
            os.path.normpath(os.path.expanduser("~")),
            "__USER_DESKTOP_DIR__":
            os.path.normpath("$(__USER_HOME_DIR__)/Desktop"),
            "__USER_TEMP_DIR__":
            os.path.normpath(
                os.path.join(tempfile.gettempdir(),
                             "$(SYNC_BASE_URL_MAIN_ITEM)/$(REPO_NAME)")),
            "__SYSTEM_LOG_FILE_PATH__":
            utils.get_system_log_file_path(),
            "__INVOCATION_RANDOM_ID__":
            ''.join(random.choice(string.ascii_lowercase) for _ in range(16)),
            "__SUDO_USER__":
            os.environ.get("SUDO_USER", "no set"),
            # VENDOR_NAME, APPLICATION_NAME need to be set so logging can be redirected to the correct folder
            "VENDOR_NAME":
            os.environ.get("VENDOR_NAME", "Waves Audio"),
            "APPLICATION_NAME":
            os.environ.get("APPLICATION_NAME", "Waves Central"),
            "__ARGV__":
            argv,
            "ACTING_UID":
            -1,
            "ACTING_GID":
            -1,
        }

        if os_family_name != "Win":
            initial_vars.update({
                "__USER_ID__": str(os.getuid()),
                "__GROUP_ID__": str(os.getgid())
            })
        else:
            initial_vars.update({
                "__USER_ID__":
                -1,
                "__GROUP_ID__":
                -1,
                "__WHO_LOCKS_FILE_DLL_PATH__":
                "$(__INSTL_DATA_FOLDER__)/who_locks_file.dll"
            })

        instance = None
        if options.__MAIN_COMMAND__ == "command-list":
            from pyinstl.instlCommandList import run_commands_from_file
            run_commands_from_file(initial_vars, options)
        elif options.mode == "client":  #shai, maybe add a log here?  before all imports
            log.debug("begin, importing instl object")  #added by oren
            from pyinstl.instlClient import InstlClientFactory
            instance = InstlClientFactory(initial_vars,
                                          options.__MAIN_COMMAND__)
            instance.progress("welcome to instl",
                              instance.get_version_str(short=True),
                              options.__MAIN_COMMAND__)
            instance.init_from_cmd_line_options(options)
            instance.do_command(
            )  # after all preprartion is done, we execute the command itself
        elif options.mode == "doit":
            from pyinstl.instlDoIt import InstlDoIt
            instance = InstlDoIt(initial_vars)
            instance.progress("welcome to instl",
                              instance.get_version_str(short=True),
                              options.__MAIN_COMMAND__)
            instance.init_from_cmd_line_options(options)
            instance.do_command()
        elif options.mode == "do_something":
            from pyinstl.instlMisc import InstlMisc
            instance = InstlMisc(initial_vars, options.__MAIN_COMMAND__)
            instance.progress("welcome to instl",
                              instance.get_version_str(short=True),
                              options.__MAIN_COMMAND__)
            instance.init_from_cmd_line_options(options)
            instance.do_command()
        elif not getattr(
                sys, 'frozen', False
        ):  # these modes are not available in compiled instl to avoid issues such as import errors for users
            if options.mode == "admin":
                if os_family_name not in ("Linux", "Mac"):
                    raise EnvironmentError(
                        "instl admin commands can only run under Mac or Linux")
                from pyinstl.instlAdmin import InstlAdmin
                instance = InstlAdmin(initial_vars)
                instance.progress("welcome to instl",
                                  instance.get_version_str(short=True),
                                  options.__MAIN_COMMAND__)
                instance.init_from_cmd_line_options(options)
                instance.do_command()
            elif options.mode == "interactive":
                from pyinstl.instlClient import InstlClient
                client = InstlClient(initial_vars)
                client.init_from_cmd_line_options(options)
                from pyinstl.instlAdmin import InstlAdmin
                from pyinstl.instlInstanceBase_interactive import go_interactive
                admin = InstlAdmin(initial_vars)
                admin.init_from_cmd_line_options(options)
                go_interactive(client, admin)
            elif options.mode == "gui":
                from pyinstl.instlGui import InstlGui
                instance = InstlGui(initial_vars)
                instance.init_from_cmd_line_options(options)
                instance.do_command()

        # make sure instance's dispose functions are called
        if instance is not None:
            instance.close()
Пример #41
0
import appdirs
import os


 # WHEN COMPLETED, THIS FILE WILL SET UP APPLICATION DIRECTORY CROSS PLATFORMS
appname = "SyncMonster"
appauthor = "SyncMonster"


app_path = appdirs.site_data_dir(appname, appauthor).replace('ubuntu', '')
if not os.path.exists(app_path):
    create_path = appdirs.site_data_dir(appname, appauthor)
    os.makedirs(create_path+'/temp')
    os.makedirs(create_path+'/Upload')
    
    sym_upload_dir = os.path.join(os.path.expanduser("~"), "Desktop", 'SyncMonster Upload')
    os.symlink(app_path+'/Upload', sym_upload_dir)


    print 'MADE DIR'
else:
    print '%s EXISTS' % app_path

Пример #42
0
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import appdirs

from prac._version import APPNAME, APPAUTHOR

root = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
code_base = root
user_data = appdirs.user_data_dir(APPNAME, APPAUTHOR)

if os.path.basename(root).startswith('python'):
    root = os.path.realpath(os.path.join(root, '..'))
    app_data = root
else:
    app_data = appdirs.site_data_dir(APPNAME, APPAUTHOR)
    if not os.path.exists(app_data):
        app_data = user_data

trdparty = os.path.join(app_data, '3rdparty')
data = os.path.join(app_data, 'data')
nltk_data = os.path.join(data, 'nltk_data')
etc = os.path.join(app_data, 'etc')
examples = os.path.join(app_data, 'examples')
models = os.path.join(app_data, 'models')
pracmodules = os.path.join(code_base, 'prac', 'pracmodules')
Пример #43
0
 def _set_working_directory_if_default(self):
     if not self.common.__dict__.get('working_dir_root') or \
         self.common.working_dir_root == 'default':
         self.common.working_dir_root = appdirs.site_data_dir('ezyt')
Пример #44
0
class Crossgrader:
    """Finds packages to crossgrade and crossgrades them.

    Instance attributes:
        target_arch: A string representing the target architecture of dpkg.
        current_arch: A string representing the current architecture of dpkg.
        non_supported_arch: A boolean indicating whether the target arch is natively supported
            by the current CPU.
        qemu_installed: A boolean indicated whether or not qemu-user-static is installed.

        _apt_cache: python3-apt cache

    Class attributes:
        initramfs_functions_backup_path: Path to the backup of hook-functions.
        arch_check_hook_path: Path to the arch-check-hook.sh shell script.
        qemu_deb_path: Path to a directory containing temporary cached qemu debs.

    """

    APT_CACHE_DIR = '/var/cache/apt/archives'
    DPKG_INFO_DIR = '/var/lib/dpkg/info'
    INITRAMFS_FUNCTIONS_PATH = '/usr/share/initramfs-tools/hook-functions'

    ARCH_CHECK_HOOK_NAME = 'arch-check-hook.sh'
    INITRAMFS_FUNCTIONS_BACKUP_NAME = 'hook-functions.bak'
    QEMU_DEB_DIR_NAME = 'qemu-debs'

    APP_NAME = 'debian_crossgrader'
    storage_dir = appdirs.site_data_dir(APP_NAME)

    FALLBACK_CROSSGRADER_DEPENDENCIES = ['python3', 'python3-apt']

    # qemu_deb_path will be filled w/ qemu-user-static debs if self.non_supported_arch == True
    # during first stage
    # if it exists, its debs will be installed before second stage
    qemu_deb_path = os.path.join(storage_dir, QEMU_DEB_DIR_NAME)
    initramfs_functions_backup_path = os.path.join(
        storage_dir, INITRAMFS_FUNCTIONS_BACKUP_NAME)

    script_dir = os.path.dirname(os.path.realpath(__file__))
    arch_check_hook_path = os.path.join(script_dir, ARCH_CHECK_HOOK_NAME)

    def __init__(self, target_architecture):
        """Inits Crossgrader with the given target architecture.

        Raises:
            InvalidArchitectureError: The given target_architecture is not recognized
                by dpkg.
        """

        # set LC_ALL=C so we can rely on command output being English
        os.environ['LC_ALL'] = 'C'

        try:
            os.makedirs(self.storage_dir, exist_ok=True)
        except PermissionError:
            raise PermissionError('crossgrader: must be superuser.')

        valid_architectures = subprocess.check_output(
            ['dpkg-architecture', '--list-known'],
            universal_newlines=True).splitlines()
        if target_architecture not in valid_architectures:
            raise InvalidArchitectureError(
                'Architecture {} is not recognized by dpkg.'.format(
                    target_architecture))

        subprocess.check_call(
            ['dpkg', '--add-architecture', target_architecture])

        self.current_arch = subprocess.check_output(
            ['dpkg', '--print-architecture'], universal_newlines=True).strip()
        self.target_arch = target_architecture

        arch_test_ret = subprocess.call(['arch-test', '-n', self.target_arch],
                                        stdout=subprocess.DEVNULL,
                                        stderr=subprocess.DEVNULL)
        if arch_test_ret != 0:
            if arch_test_ret == 2:
                # no need to throw an error here, qemu-user-static should be able to handle it
                print((
                    'arch-test lacks a helper for {}; assuming not supported on this machine '
                    'but runnable with emulation.').format(self.target_arch))
            elif arch_test_ret == 1:
                # ensure target arch can be run with emulation for foreign package setup
                support_with_emu = subprocess.call(
                    ['arch-test', self.target_arch],
                    stdout=subprocess.DEVNULL,
                    stderr=subprocess.DEVNULL) == 0
                if not support_with_emu:
                    raise InvalidArchitectureError((
                        'Architecture {} is not runnable on this machine. Please install '
                        'qemu-user-static and try again.').format(
                            self.target_arch))
            else:
                raise CrossgradingError('Ensure arch-test is installed.')

            self.non_supported_arch = True
        else:
            self.non_supported_arch = False

        if self.non_supported_arch:
            print(('Architecture {} is not natively supported '
                   'on the current machine.').format(self.target_arch))

        print('Installing initramfs binary architecture check hook...')
        if self.create_initramfs_arch_check():
            print('Hook installed.')
        else:
            print('Hook installation failed.')

        self._apt_cache = apt.Cache()
        try:
            self._apt_cache.update(apt.progress.text.AcquireProgress())
            self._apt_cache.open()  # re-open to utilise new cache
        except apt.cache.FetchFailedException:
            traceback.print_exc()
            print('Ignoring...')

        self.qemu_installed = self._apt_cache['qemu-user-static'].is_installed

    def __enter__(self):
        """Enter the with statement"""
        return self

    def __exit__(self, exc_type, exc_value, exc_traceback):
        """Exit the with statement"""
        self.close()

    def close(self):
        """Close the package cache"""
        self._apt_cache.close()

    def create_initramfs_arch_check(self):
        """Inserts the contents of arch-check-hook.sh into the copy_exec function.

        It locates the start of the copy_exec function
        in /usr/share/initramfs-tools/hook-functions, and inserts the arch check hook
        to check the architecture of all binaries that are copied into the initramfs.

        If the arch check hook already exists, then it is not copied.

        The arch-test package must be installed for the hook to function.

        Returns:
            True if the hook was successfully installed, False otherwise.
        """

        if not os.path.isfile(self.INITRAMFS_FUNCTIONS_PATH):
            print('hook-functions file does not exist.')
            return False

        with open(self.INITRAMFS_FUNCTIONS_PATH, 'r') as functions_file:
            functions_lines = functions_file.read().splitlines()

        # is there a better way than using a magic string?
        if '# begin arch-check-hook' in functions_lines:
            print('arch check hook already installed.')
            return False

        shutil.copy2(self.INITRAMFS_FUNCTIONS_PATH,
                     self.initramfs_functions_backup_path)
        assert os.path.isfile(self.initramfs_functions_backup_path)
        print('Backed up hook-functions to {}'.format(
            self.initramfs_functions_backup_path))

        with open(self.arch_check_hook_path, 'r') as arch_hook_file:
            arch_hook_lines = arch_hook_file.read().splitlines()
        for idx, line in enumerate(arch_hook_lines):
            arch_hook_lines[idx] = line.replace('TARGET_ARCH_PLACEHOLDER',
                                                self.target_arch)

        try:
            hook_index = functions_lines.index('copy_exec() {') + 1
        except ValueError:
            print('Could not find copy_exec function definition.')
            return False

        functions_lines = functions_lines[:hook_index] + arch_hook_lines + \
                          functions_lines[hook_index + 1:]
        with open(self.INITRAMFS_FUNCTIONS_PATH, 'w') as functions_file:
            functions_file.write('\n'.join(functions_lines))

        return True

    @staticmethod
    def remove_initramfs_arch_check():
        """Restores the contents of hook-functions.

        This function should be called at the end of the crossgrade process.
        Currently, the hook is installed every time the Crossgrader function is created
        and removed when during in the third_stage function.

        Returns:
            True if the hook was successfully removed, False otherwise.
        """

        if not os.path.isfile(Crossgrader.INITRAMFS_FUNCTIONS_PATH):
            print('hook-functions file does not exist.')
            return False

        if not os.path.isfile(Crossgrader.initramfs_functions_backup_path):
            print('Backup file does not exist.')
            return False

        with open(Crossgrader.INITRAMFS_FUNCTIONS_PATH, 'r') as functions_file:
            functions_lines = functions_file.read().splitlines()

        if '# begin arch-check-hook' not in functions_lines:
            print('arch check hook not installed.')
            return False

        shutil.copy2(Crossgrader.initramfs_functions_backup_path,
                     Crossgrader.INITRAMFS_FUNCTIONS_PATH)
        os.remove(Crossgrader.initramfs_functions_backup_path)
        return True

    @staticmethod
    def _fix_dpkg_errors(packages):
        """Tries to fix the given packages that dpkg failed to install.

        First, run apt install -f.

        If the errors are still not fixed, remove all other co-installed packages
        for packages declared M-A: same.

        Returns:
            True if all errors were fixed, False otherwise.
        """

        if not packages:
            return True

        print('Running apt-get --fix-broken install...')
        # let user select yes/no
        ret_code = subprocess.call(['apt-get', 'install', '-f', '-y'])
        if ret_code == 0:
            return True

        print('apt-get --fix-broken install failed.')
        print('Removing all coinstalled packages...')
        for package in packages:
            package_status_proc = subprocess.Popen(['dpkg', '-s', package],
                                                   stdout=subprocess.PIPE,
                                                   stderr=sys.stderr,
                                                   universal_newlines=True)
            package_status, __ = package_status_proc.communicate()
            if 'Multi-Arch: same' not in package_status:
                continue

            # architecture should be specified for M-A: same packages
            assert ':' in package

            short_name = package[:package.index(':')]
            coinstalled = subprocess.check_output(
                [
                    'dpkg-query', '-f', '${Package}:${Architecture}\n', '-W',
                    short_name
                ],
                universal_newlines=True).splitlines()
            for coinstalled_package in coinstalled:
                if coinstalled_package == package:
                    continue

                ret_code = subprocess.call([
                    'dpkg', '--remove', '--force-depends', coinstalled_package
                ])
                if ret_code == 0:
                    continue

                print('dpkg failed to remove {}.'.format(coinstalled_package))

                # this triggers lintian: uses-dpkg-database-directly,
                # but is necessary to handle crossgrading
                # packages like python3-pil and python3-cairo
                # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=810551
                prerm_script = '{}.prerm'.format(coinstalled_package)
                prerm_script = os.path.join(Crossgrader.DPKG_INFO_DIR,
                                            prerm_script)

                if os.path.isfile(prerm_script):
                    print('prerm script found: {}'.format(prerm_script))
                    cont = input(
                        'Remove prerm script and try again [Y/n]? ').lower()
                    if cont == 'y' or not cont:
                        os.remove(prerm_script)
                        ret_code = subprocess.call([
                            'dpkg', '--remove', '--force-depends',
                            coinstalled_package
                        ])
                        if ret_code != 0:
                            print("Couldn't remove {}.".format(
                                coinstalled_package))

        print('Running dpkg --configure -a ...')
        ret_code = subprocess.call(['dpkg', '--configure', '-a'])
        if ret_code != 0:
            return False

        print('Running apt-get --fix-broken install...')
        ret_code = subprocess.call(['apt-get', 'install', '-f'])
        if ret_code == 0:
            return True

        return False

    @staticmethod
    def _install_and_configure(debs_to_install):
        """Runs one pass of dpkg -i and dpkg --configure -a on the input .deb files.

        dpkg outputs failures in two ways: the .deb file that failed,
        or the package name that failed.

        .deb files are outputted if the installation totally failed in some way
        (dependency error, etc.), so that the package wasn't added to the database.
        They should be retried later.

        package names are outputted if the installation didn't completely fail.
        The errors should be fixed, and then dpkg --configure -a should be run.

        Returns:
            A list of .debs that failed to be installed, and a list of packages
            that failed to be installed.
        """
        def get_dpkg_failures(dpkg_errs):
            """Returns a tuple of (failed_debs, failed_packages) parsed from dpkg's stderr."""
            debs = set()
            packages = set()

            capture_packages = False

            for line in dpkg_errs:
                line = line.strip()

                if capture_packages:
                    if line.endswith('.deb'):
                        assert os.path.isfile(
                            line), '{} does not exist'.format(line)
                        debs.add(line)
                    else:
                        packages.add(line)

                if line == 'Errors were encountered while processing:':
                    capture_packages = True

            return debs, packages

        # set max error count so dpkg does not abort from too many errors
        # multiply by 2 because a package can have multiple errors
        max_error_count = max(50, len(debs_to_install) * 2)

        error_count_option = '--abort-after={}'.format(max_error_count)

        proc = subprocess.Popen(['dpkg', '-i', error_count_option] +
                                debs_to_install,
                                stdout=sys.stdout,
                                stderr=subprocess.PIPE,
                                universal_newlines=True)
        __, __, errs = cmd_utils.tee_process(proc)

        failed_debs, failed_packages = get_dpkg_failures(errs.splitlines())

        print('Running dpkg --configure -a...')
        proc = subprocess.Popen(
            ['dpkg', '--configure', '-a', error_count_option],
            stdout=sys.stdout,
            stderr=subprocess.PIPE,
            universal_newlines=True)
        __, __, errs = cmd_utils.tee_process(proc)

        new_failed_debs, new_failed_packages = get_dpkg_failures(
            errs.splitlines())
        failed_debs.update(new_failed_debs)
        failed_packages.update(new_failed_packages)

        return list(failed_debs), list(failed_packages)

    @staticmethod
    def _install_configure_loop(debs_to_install):
        """
        Repeatedly runs _install_and_configure until all .debs are installed or
        failures stop decreasing.

        Args:
            debs_to_install: A list of paths to .deb files.

        Returns:
            A list of packages that were not successfully installed.

        Raises:
            PackageInstallationError: Some of the packages were not successfully
                installed/configured.
        """
        # unfeasible to perform a topological sort (complex/circular dependencies, etc.)
        # easier to install/reconfigure repeatedly until errors resolve themselves

        # use dpkg to perform the crossgrade because apt does not realize that crossgrading
        # a package will not necessarily break it because of qemu user emulation
        # e.g. apt refuses to install perl-base

        # crossgrade in one call to prevent repeat triggers
        # (e.g. initramfs rebuild), saving time

        # https://blog.zugschlus.de/archives/972-How-to-amd64-an-i386-Debian-installation-with-multiarch.html#c24572
        # Summary: Use dpkg --unpack; dpkg --configure --pending instead of dpkg -i?
        # dpkg --unpack still gets stuck because of various pre-depends

        loop_count = 0
        debs_remaining = debs_to_install
        failed_packages = None

        while debs_remaining:
            loop_count += 1
            print('dpkg -i/--configure loop #{}'.format(loop_count))

            failed_debs, failed_packages = Crossgrader._install_and_configure(
                debs_remaining)

            for deb in debs_remaining:
                if deb not in failed_debs:
                    os.remove(deb)

            assert len(failed_debs) <= len(debs_remaining)

            if len(failed_debs) == len(debs_remaining):
                print('Number of failed installs did not decrease, halting...')
                raise PackageInstallationError(failed_debs)

            debs_remaining = failed_debs

            if debs_remaining:
                print(
                    'The following .deb files were not fully installed, retrying...'
                )
                for deb in debs_remaining:
                    print('\t{}'.format(deb))

        return failed_packages

    @staticmethod
    def install_packages(debs_to_install=None, fix_broken=True):
        """Installs specified .deb files.

        Installs the .deb files specified in packages_to_install
        using a looping dpkg -i *.deb / dpkg --configure -a.

        Args:
            debs_to_install: A list of paths to .deb files. If it is None,
                all .debs in APT's cache will be installed.
            fix_broken: If true, try to fix any dpkg errors after installation.

        Raises:
            PackageInstallationError: Some of the packages were not successfully
                installed/configured.
        """
        if debs_to_install is None:
            debs_to_install = glob(
                os.path.join(Crossgrader.APT_CACHE_DIR, '*.deb'))

        # find all packages marked as autoinstalled, and match them to the newly installed ones
        # apt-mark shows packages in native architecture without colon and others with
        print('Parsing automatically installed packages...')
        proc = subprocess.Popen(['apt-mark', 'showauto'],
                                stdout=subprocess.PIPE,
                                universal_newlines=True)
        auto_pkgs_list, __ = proc.communicate()
        auto_pkgs_list = auto_pkgs_list.splitlines()

        ret_code = proc.returncode
        assert ret_code == 0, 'apt-mark showauto failed with code {}'.format(
            ret_code)

        # to handle packages installed in non-native architectures
        # (e.g. all native packages during second stage), strip all architectures from the
        # packages returned by apt-mark
        # therefore, if any existing package with the same name is auto-installed,
        # then the newly installed one will be auto-installed as well
        auto_pkgs = set()
        for pkg in auto_pkgs_list:
            try:
                auto_pkgs.add(pkg[:pkg.index(':')])
            except ValueError:
                auto_pkgs.add(pkg)

        # must find such packages before they are installed because dpkg -i will re-mark
        # them as manually installed
        mark_auto_pkgs = []
        for deb in debs_to_install:
            proc = subprocess.Popen([
                'dpkg-deb', '--showformat=${Package}:${Architecture}', '-W',
                deb
            ],
                                    stdout=subprocess.PIPE,
                                    universal_newlines=True)
            pkg_full_name, __ = proc.communicate()

            ret_code = proc.returncode
            assert ret_code == 0, 'dpkg-deb failed with code {}'.format(
                ret_code)

            pkg_short_name = pkg_full_name[:pkg_full_name.index(':')]
            if pkg_short_name in auto_pkgs:
                mark_auto_pkgs.append(pkg_full_name)
        print('...done')

        failed_packages = Crossgrader._install_configure_loop(debs_to_install)

        if fix_broken and not Crossgrader._fix_dpkg_errors(failed_packages):
            print('Some dpkg errors could not be fixed automatically.')

        print('Re-marking packages as auto-installed...')
        ret_code = subprocess.call(['apt-mark', 'auto'] + mark_auto_pkgs,
                                   stdout=subprocess.DEVNULL)
        print('...done')

    def cache_package_debs(self, targets, target_dir=None):
        """Cache specified packages.

        Clears APT's .deb cache, then downloads specified packages
        to /var/apt/cache/archives or the given target_dir using python-apt.

        Args:
            targets: A list of apt.package.Package objects to crossgrade.
            target_dir: If target_dir set, move all cached .debs the given directory.
        """

        # clean /var/cache/apt/archives for download
        # is there a python-apt function for this?
        subprocess.check_call(['apt-get', 'clean'])

        # use python-apt to cache .debs for package and dependencies
        # because apt-get --download-only install will not download
        # if it can't find a good way to resolve dependencies
        for target in targets:
            if target.is_installed:
                continue

            target.mark_install(
                auto_fix=False)  # do not try to fix broken packages

            # some packages (python3-apt) refuses to mark as install for some reason
            if not target.marked_install:
                print(('Could not mark {} for install, '
                       'fixing manually.').format(target.fullname))
                target.mark_install(auto_fix=False, auto_inst=False)
                assert target.marked_install, \
                       '{} not marked as install despite no auto_inst'.format(target.fullname)

        __, __, free_space = shutil.disk_usage(self.APT_CACHE_DIR)

        required_space = 0
        for package in self._apt_cache:
            if package.marked_install:
                required_space += package.candidate.installed_size
                required_space += package.candidate.size

        if required_space > free_space:
            raise NotEnoughSpaceError(
                '{} bytes free but {} bytes required'.format(
                    free_space, required_space))

        # fetch_archives() throws a more detailed error if a specific package
        # could not be downloaded for some reason.
        # Do not check its return value; it is undefined.
        self._apt_cache.fetch_archives()

        self._apt_cache.clear()

        if target_dir is not None:
            os.makedirs(target_dir, exist_ok=True)

            for deb in glob(os.path.join(Crossgrader.APT_CACHE_DIR, '*.deb')):
                shutil.move(deb, target_dir)

    def find_package_objs(self, names, **kwargs):
        """Wrapper for debian_crossgrader.utils.apt's find_package_objs,
        passing in the instance's cache."""
        return apt_utils.find_package_objs(names, self._apt_cache, **kwargs)

    def _is_first_stage_target(self, package):
        """Returns a boolean of whether or not the apt.package.Package is a first stage target.

        A first stage target is a package with Priority: required/important.

        This function does not check if the package has installed initramfs hooks.

        Without first stage targets being crossgraded, the system will fail to reboot to the
        new architecture or will be useless after reboot.
        """
        if not package.is_installed:
            return False

        # do not use package.architecture() because Architecture: all packages
        # returns the native architecture
        if package.installed.architecture in ('all', self.target_arch):
            return False

        if package.installed.priority in ('required', 'important'):
            return True

        return False

    def _get_initramfs_hook_packages(self, ignore_initramfs_remnants=False):
        """Returns a set of packages shortnames that contain initramfs hooks.

        Raises:
            RemnantInitramfsHooksError: Some initramfs hooks could not be matched with a package.
        """
        out_names = set()

        unaccounted_hooks = set(glob('/usr/share/initramfs-tools/hooks/*'))
        hook_pkgs = apt_utils.iter_packages_containing_files(
            self._apt_cache, '/usr/share/initramfs-tools/hooks/*')

        for package, hook_file in hook_pkgs:
            if hook_file not in unaccounted_hooks:
                print(('Expected {} to contain an initramfs hook, '
                       'but it does not.').format(package.fullname))
                print('Skipping.')
                continue

            unaccounted_hooks.discard(hook_file)

            if not package.is_installed:
                print(('WARNING: {}, containing an initramfs hook, '
                       'is marked as not fully installed.').format(package))
                print('Assuming it is installed.')
                architecture = package.candidate.architecture
            else:
                architecture = package.installed.architecture

            if architecture not in ('all', self.target_arch):
                out_names.add(package.shortname)

        if unaccounted_hooks and not ignore_initramfs_remnants:
            raise RemnantInitramfsHooksError(unaccounted_hooks)

        return out_names

    def list_first_stage_targets(self,
                                 ignore_initramfs_remnants=False,
                                 ignore_unavailable_targets=False):
        """Returns a list of apt.package.Package objects that must be crossgraded before reboot.

        Retrieves and returns a list of all packages with Priority: required/important,
        packages with installed initramfs hooks, packages for login shells, and crossgrader
        dependencies.

        Args:
            ignore_initramfs_remnants: If true, do not raise a RemnantInitramfsHooksError if
                there are initramfs hooks that could not be linked.
            ignore_unavailable_targets: If true, do not raise a PackageNotFoundError if a package
                could not be found in the target architecture.

        Raises:
            RemnantInitramfsHooksError: Some initramfs hooks could not be matched with a package.
            PackageNotFoundError: A required package in the target architecture was not available
                in APT's cache.
        """

        targets = {
            pkg.shortname
            for pkg in apt_utils.iter_package_objs(self._apt_cache)
            if self._is_first_stage_target(pkg)
        }
        targets |= self._get_initramfs_hook_packages(ignore_initramfs_remnants)

        # crossgrade crossgrader dependencies
        # if python-apt is not crossgraded, it will not find any packages other than
        # its own architecture/installed packages
        try:
            crossgrader_pkg = self._apt_cache['crossgrader']
        except KeyError:
            # fallback if not installed as package or package doesn't exist
            targets.update(self.FALLBACK_CROSSGRADER_DEPENDENCIES)
        else:
            if not crossgrader_pkg.is_installed:
                targets.update(self.FALLBACK_CROSSGRADER_DEPENDENCIES)
            else:
                for dep in crossgrader_pkg.installed.dependencies:
                    targets.update(
                        ver.package.shortname
                        for ver in dep.installed_target_versions
                        if ver.architecture not in ('all', self.target_arch))

        # crossgrade all available login shells
        # if login shell is not crossgrader and isn't priority: required (e.g. zsh), then the user
        # cannot login
        shells = shells_utils.list_shells()
        for package, __ in apt_utils.iter_packages_containing_files(
                self._apt_cache, *shells):
            if not package.is_installed:
                print(('WARNING: {}, containing a login shell, '
                       'is marked as not fully installed.').format(package))
                print('Assuming it is installed.')
                architecture = package.candidate.architecture
            else:
                architecture = package.installed.architecture

            if architecture not in ('all', self.target_arch):
                targets.add(package.shortname)

        targets.add('sudo')

        return self.find_package_objs(
            targets,
            default_arch=self.target_arch,
            ignore_unavailable_targets=ignore_unavailable_targets,
            ignore_installed=True)

    def list_second_stage_targets(self, ignore_unavailable_targets):
        """Returns a list of apt.package.Package objects that are not in the target architecture.

        Args:
            ignore_unavailable_targets: If true, do not raise a PackageNotFoundError if a package
                could not be found in the target architecture.

        Raises:
            PackageNotFoundError: A required package in the target architecture was not available
                in APT's cache.
        """
        targets = set()
        for pkg in apt_utils.iter_package_objs(self._apt_cache):
            if not pkg.is_installed:
                continue

            if pkg.installed.architecture not in ('all', self.target_arch):
                targets.add(pkg.shortname)

        return self.find_package_objs(
            targets,
            default_arch=self.target_arch,
            ignore_unavailable_targets=ignore_unavailable_targets,
            ignore_installed=True)
Пример #45
0
# -*- coding: utf-8 -*-
"""orphan.data -- data files.
"""
import appdirs
from path import path

name = 'Orphan'
author = 'Worlds Enough Studios'

USER_DATA = path(appdirs.user_data_dir(name, author))
SITE_DATA = path(appdirs.site_data_dir(name, author))
CACHE_DATA = path(appdirs.user_cache_dir(name, author))
LOG_DATA = path(appdirs.user_log_dir(name, author))
APP_DATA = path(__file__).abspath().dirname()