Esempio n. 1
0
def install_virtualenv_contents_setup(venv_path, package_repository_path,
                                      package_list_path):
    """Install or upgrade requirements into an existing virtualenv using setup.py"""
    # Create temporary working directory
    work_dir = sudo('mktemp -d')

    # Read contents of file with package names
    reqs_file_contents = run('cat "{package_list_path}"'.format(**locals()))

    for fn in reqs_file_contents.split('\n'):
        fn = fn.strip()
        if fn == '':
            continue

        # Unpack the package into the temporary directory
        with cd(work_dir):
            sudo(get_extract_command(posixjoin(package_repository_path, fn)))

        # Run setup.py from extracted package
        package_name = remove_package_extension(fn)
        extracted_package_dir = posixjoin(work_dir, package_name)
        with cd(extracted_package_dir):
            log_name = posixjoin(work_dir, package_name + '.log')
            sudo('{venv_path}/bin/python setup.py install > {log_name} 2>&1'.
                 format(**locals()))
Esempio n. 2
0
    def find_tarball(self, modulename, max_version, wantchecksum):
        if modulename not in self.modules:
            return None, None, None

        resp = perform_request(
            posixjoin(self.baseurl, modulename, 'cache.json'))
        versions = resp.json()[1][modulename]
        latest = get_latest_version(versions.keys(), max_version)

        if not latest:
            return None, None, None

        extensions = ['tar.xz', 'tar.bz2', 'tar.gz']
        for ext in extensions:
            if ext in versions[latest]:
                tarball = versions[latest][ext]
                break
        else:
            # unknown extension
            return None, None, None

        checksum = None
        if wantchecksum and 'sha256sum' in versions[latest]:
            resp = perform_request(
                posixjoin(self.baseurl, modulename,
                          versions[latest]['sha256sum']))

            basename = os.path.basename(tarball)
            for l in resp.text.splitlines():
                l = l.split()
                if basename == l[1]:
                    checksum = l[0]
                    break

        return posixjoin(self.baseurl, modulename, tarball), latest, checksum
Esempio n. 3
0
    def find_tarball(self, modulename, max_version, wantchecksum):
        good_dir = re.compile('^([0-9]+\.)*[0-9]+/?$')

        def hasdirs(x):
            return good_dir.search(x)

        def fixdirs(x):
            return re.sub(r'^([0-9]+\.[0-9]+)/?$', r'\1', x)

        location = self.baseurl.format(module=modulename)

        while True:
            req = perform_request(location)
            files = get_links(req.text)

            # Check to see if we need to descend to a subdirectory
            newdirs = [fixdirs(dir) for dir in files if hasdirs(dir)]
            if newdirs:
                assert max_version is None or len(
                    max_version.split('.')
                ) <= 2, "limit can't have micro version when the project uses subdirs"
                newdir = get_latest_version(newdirs, max_version)
                location = posixjoin(req.url, newdir, "")
            else:
                break

        basenames = set()
        tarballs = []
        extensions = ['.tar.xz', '.tar.bz2', '.tar.gz']

        # Has to be checked by extension first; we prefer .tar.xz over .tar.bz2 and .tar.gz
        for ext in extensions:
            for file in files:
                basename = file[:-len(
                    ext)]  # only valid when file ends with ext
                if file.endswith(ext) and basename not in basenames:
                    basenames.add(basename)
                    tarballs.append(file)

        re_tarball = r'^' + re.escape(
            modulename) + '[_-](([0-9]+[\.\-])*[0-9]+)(\.orig)?\.tar.*$'

        tarballs = [t for t in tarballs if re.search(re_tarball, t)]
        versions = [re.sub(re_tarball, r'\1', t) for t in tarballs]

        if not versions:
            return None, None, None

        version = get_latest_version(versions, max_version)
        index = versions.index(version)

        location = posixjoin(location, tarballs[index])

        return location, version, None
Esempio n. 4
0
def upimg_save(**kwargs):
    res = dict(code=1)
    try:
        filename = kwargs["filename"]
        stream = kwargs["stream"]
        upload_path = kwargs.get("upload_path") or ""
        local_basedir = get_basedir()
        if not filename or not stream or not local_basedir:
            return ValueError
    except (KeyError, ValueError):
        res.update(msg="Parameter error")
    else:
        if isinstance(upload_path, string_types):
            if upload_path.startswith("/"):
                upload_path = upload_path.lstrip('/')
            saveto = join(local_basedir, upload_path)
            if not exists(saveto):
                makedirs(saveto)
            filepath = join(saveto, filename)
            with open(filepath, "wb") as fp:
                fp.write(stream)
                res.update(code=0,
                           src=url_for("static",
                                       filename=posixjoin(
                                           current_app.config['UPLOAD_FOLDER'],
                                           upload_path, filename),
                                       _external=True))
        else:
            res.update(msg="The upload_path type error")
    return res
Esempio n. 5
0
    def __init__(self, preprocessors=None, postprocessors=None, settings=None):
        if settings.has_key('api_version'):
            self._api_version = 'v' + str(settings['api_version'])
        else:
            self._api_version = 'v1'

        if settings.has_key('instance_url'):
            self._base_url = settings['instance_url']
        else:
            self._base_url = 'https://search.mapzen.com'

        self._default_endpoint = urljoin(self._base_url,
                                    posixjoin(self._api_version, 'search'))
        self._key_endpoint = urljoin(self._base_url,
                                posixjoin(self._api_version, 'place'))
        self._endpoint = self._default_endpoint

        preprocessors = Mapzen.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors
        postprocessors = Mapzen.DEFAULT_POSTPROCESSORS if postprocessors is None else postprocessors
        GeocodeService.__init__(self, preprocessors, postprocessors, settings)
Esempio n. 6
0
    def __init__(self, preprocessors=None, postprocessors=None, settings=None):
        if settings.has_key('api_version'):
            self._api_version = 'v' + str(settings['api_version'])
        else:
            self._api_version = 'v1'

        if settings.has_key('instance_url'):
            self._base_url = settings['instance_url']
        else:
            self._base_url = 'https://search.mapzen.com'

        self._default_endpoint = urljoin(
            self._base_url, posixjoin(self._api_version, 'search'))
        self._key_endpoint = urljoin(self._base_url,
                                     posixjoin(self._api_version, 'place'))
        self._endpoint = self._default_endpoint

        preprocessors = Mapzen.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors
        postprocessors = Mapzen.DEFAULT_POSTPROCESSORS if postprocessors is None else postprocessors
        GeocodeService.__init__(self, preprocessors, postprocessors, settings)
Esempio n. 7
0
    def __init__(self, preprocessors=None, postprocessors=None, settings=None):
        if 'api_version' in settings:
            self._api_version = 'v' + str(settings['api_version'])
        else:
            self._api_version = 'v1'

        if 'instance_url' in settings:
            self._base_url = settings['instance_url']
        else:
            self._base_url = 'https://api.geocode.earth/'

        self._default_endpoint = urljoin(
            self._base_url, posixjoin(self._api_version, 'search'))
        self._key_endpoint = urljoin(self._base_url,
                                     posixjoin(self._api_version, 'place'))
        self._endpoint = self._default_endpoint

        preprocessors = Pelias.DEFAULT_PREPROCESSORS if preprocessors is None else preprocessors
        postprocessors = Pelias.DEFAULT_POSTPROCESSORS if postprocessors is None else postprocessors
        GeocodeService.__init__(self, preprocessors, postprocessors, settings)
Esempio n. 8
0
    def links(self, page, base="", favicon="", styles=(), **kwargs):
        """ Iterator[str]: <link> tags in page <head>. """
        home, urlpath = self.home, self.urlpath

        page = page.with_suffix(".html")
        link = '<link rel="{}" href="{}">'.format
        if base:
            yield link("canonical", posixjoin(base, urlpath(home, page)))
        if favicon:
            yield link("icon", urlpath(page, favicon))
        for sheet in styles:
            yield link("stylesheet", urlpath(page, sheet))
Esempio n. 9
0
def guess_metadata_type(resource):
    parsed = urlparse.urlparse(resource)

    if not parsed.scheme:
        if os.path.exists(os.path.join(resource, 'repodata', 'repomd.xml')):
            return 'rpm-md'
        elif os.path.exists(os.path.join(resource, 'content')):
            return 'yast2'
    else:
        try:
            url = posixjoin(resource, 'repodata', 'repomd.xml')
            urllib2.urlopen(HeadRequest(url))
            return 'rpm-md'
        except urllib2.HTTPError, e:
            if e.code != 404:
                raise e
        try:
            url = posixjoin(resource, 'content')
            urllib2.urlopen(HeadRequest(url))
            return 'yast2'
        except urllib2.HTTPError, e:
            if e.code != 404:
                raise e
Esempio n. 10
0
    def urlpath(cls, page, src):
        """
        str: URL from page to (local file or remote URL).
        Inputs must be absolute path-like objects.
        """
        page, src = Path(page), posixjoin(src)

        if urlsplit(src).scheme:
            return src
        elif not src.startswith("/"):
            raise ValueError(f"ambiguous src: {src}")
        elif not page.is_absolute():
            raise ValueError(f"ambiguous page: {page}")
        else:
            return quote(relpath(src, start=page.parent.as_posix()))
Esempio n. 11
0
 def _get_file(self, fileid, outname):
     """
     Download an artifact into a local file
     """
     # Create the directory to put the file, if needed
     (head, tail) = os.path.split(outname)
     if head:
         mkpath_recursive(head)
     # Download the file
     source_path = posixjoin(*object_remote_location(fileid))
     with open(outname, 'wb') as f:
         self.reader.get(source_path, f)
     # Set permissions & modification time
     filedata = self.remote_index[fileid]
     os.chmod(outname, int(filedata[2]))
     os.utime(outname, (-1, float(filedata[0])))
Esempio n. 12
0
    def _update_upstream_data(self, branch, upstream_name_branches):
        branch_path = os.path.join(self.dest_dir, branch)

        if not os.path.exists(branch_path):
            print >> sys.stderr, 'No file %s available for requested branch %s, keeping previous data if available.' % (branch_path, branch)
            return

        (branch_id, branch_mtime) = self._get_branch_data(branch)
        stats = os.stat(branch_path)
        new_mtime = stats.st_mtime

        if not branch_id:
            # the branch does not exist, add it
            self.cursor.execute('''INSERT INTO branches VALUES (
                NULL, ?, ?
                );''',
                (branch, new_mtime))
            self.cursor.execute('''SELECT last_insert_rowid();''')
            branch_id = self.cursor.fetchone()[0]
        else:
            # do not update anything if the file has not changed
            if branch_mtime >= new_mtime:
                return
            # else update the mtime
            self.cursor.execute('''UPDATE branches SET
                mtime = ? WHERE id = ?;''',
                (new_mtime, branch_id))

        self.cursor.execute('''SELECT * FROM upstream WHERE branch = ?;''', (branch_id,))
        olddata = {}
        for row in self.cursor:
            olddata[row['name']] = (row['id'], row['version'], row['url'])

        # upstream data, after we've converted the names to branch names if
        # needed. For instance, glib:1.2.10 will translate to the "glib|1.3"
        # name but also to the "glib" name if it doesn't exist yet or if the
        # version there is lower than 1.2.10.
        real_upstream_data = {}

        re_upstream_data = re.compile('^([^:]*):([^:]+):([^:]+):(.*)$')

        file = open(branch_path)
        while True:
            line = file.readline()

            if len(line) == 0:
                break
            if self._is_line_comment(line):
                continue
            line = line[:-1]

            match = re_upstream_data.match(line)
            if not match:
                continue

            name = match.group(2)
            version = match.group(3)

            if match.group(1) == 'fallback':
                url = ''
            elif match.group(1) == 'nonfgo':
                url = match.group(4)
            elif match.group(1) == 'upstream':
                url = ''
            elif match.group(1) == 'cpan':
                url = posixjoin('http://cpan.perl.org/CPAN/authors/id/', match.group(4))
            elif match.group(1) == 'pypi':
                url = match.group(4)
            elif match.group(1) == 'fgo':
                versions = version.split('.')
                if len(versions) == 1:
                    majmin = version
                else:
                    majmin = versions[0] + '.' + versions[1]
                url = 'http://download.gnome.org/sources/%s/%s/%s-%s.tar.xz' % (name, majmin, name, version)
            else:
                print >> sys.stderr, 'Unknown upstream group for metadata: %s (full line: \'%s\').' % (match.group(1), line)
                url = ''

            ignore = False
            if real_upstream_data.has_key(name):
                (current_version, current_url) = real_upstream_data[name]
                if util.version_ge(current_version, version):
                    ignore = True

            if not ignore:
                real_upstream_data[name] = (version, url)

            # Now also fill data for 'glib|1.2.10' if it fits
            if upstream_name_branches.has_key(name):
                # name = 'glib', upstream_name_branch = 'glib|1.2.10'
                # and limit = '1.2.10'
                for (upstream_name_branch, limit) in upstream_name_branches[name]:
                    if real_upstream_data.has_key(upstream_name_branch):
                        (current_version, current_url) = real_upstream_data[upstream_name_branch]
                        if util.version_ge(current_version, version):
                            continue

                    if util.version_ge(version, limit):
                        continue

                    real_upstream_data[upstream_name_branch] = (version, url)


        for (name, (version, url)) in real_upstream_data.items():
            if olddata.has_key(name):
                # Update the entry if it has changed
                (id, oldversion, oldurl) = olddata[name]
                if oldversion != version or oldurl != url:
                    # Note: we don't put the mtime here, since we use the
                    # updated time in get_changed_packages
                    self.cursor.execute('''UPDATE upstream SET
                        version = ?, url = ?, updated = ?
                        WHERE id = ?
                        ;''',
                        (version, url, self._now, id))
                del olddata[name]
            else:
                # Add the entry
                self.cursor.execute('''INSERT INTO upstream VALUES (
                    NULL, ?, ?, ?, ?, ?
                    );''',
                    (branch_id, name, version, url, self._now))

        file.close()

        # Remove data that was removed in the source file
        if len(olddata) > 0:
            ids = [ id for (id, version, url) in olddata.values() ]
            # Delete by group of 50, since it once had to remove ~1800 items
            # and it didn't work fine
            chunk_size = 50
            ids_len = len(ids)
            for index in range(ids_len / chunk_size):
                chunk_ids = ids[index * chunk_size : (index + 1) * chunk_size]
                where = ' OR '.join([ 'id = ?' for i in range(len(chunk_ids)) ])
                self.cursor.execute('''DELETE FROM upstream WHERE %s;''' % where, chunk_ids)
            remainder = ids_len % chunk_size
            if remainder > 0:
                chunk_ids = ids[- remainder:]
                where = ' OR '.join([ 'id = ?' for i in range(len(chunk_ids)) ])
                self.cursor.execute('''DELETE FROM upstream WHERE %s;''' % where, chunk_ids)

            self._removed_upstream[branch] = olddata.keys()
        else:
            self._removed_upstream[branch] = []
Esempio n. 13
0
            os.makedirs(dirname)
        except OSError, e:
            if e.errno != errno.EEXIST:
                raise e

        if expected_hash_type and expected_hash:
            old_cache = os.path.join(self._cachedir, subpath)
            old_hash = get_hash_from_file(expected_hash_type, old_cache)
            if old_hash == expected_hash:
                shutil.copy(old_cache, cache)
                return

        fout = open(cache, 'w')
        fin = None
        try:
            fin = urllib2.urlopen(posixjoin(self.resource, subpath))
            while True:
                bytes = fin.read(500 * 1024)
                if len(bytes) == 0:
                    break
                fout.write(bytes)
            fout.close()
        except urllib2.HTTPError, e:
            if fin:
                fin.close()
            fout.close()
            os.unlink(cache)
            raise e
        fin.close()
        fout.close()
Esempio n. 14
0
 def put_object(self, data_source, object_name):
     dest_path = object_remote_location(object_name)
     self.writer.folder_ensure(dest_path[0])
     self.writer.put(data_source, posixjoin(*dest_path))
Esempio n. 15
0
 def __init__(self, url_base, subrepo='', verbose=0):
     self._verbose = verbose
     self._base = posixjoin(url_base, subrepo)
     if not self._base.endswith('/'):
         self._base += '/'