def run(self):
        # bpo-35907, CVE-2019-9948: urllib must reject local_file:// scheme
        class DummyURLopener(URLopener):
            def open_local_file(self, url):
                return url

        warnings.simplefilter('ignore', DeprecationWarning)
        self.check_func("urlopen", urllib_request.urlopen)
        self.check_func("URLopener().open", URLopener().open)
        self.check_func("URLopener().retrieve", URLopener().retrieve)
        self.check_func("DummyURLopener().open", DummyURLopener().open)
        self.check_func("DummyURLopener().retrieve", DummyURLopener().retrieve)
        self.exit_fixed()
def readTLEfile(source):
    ''' Read a TLE file (unzip if necessary) '''
    sourceName = source['name']
    sourceUrl = source['url']
    sourceFile = source['file']
    if os.path.isfile(sourceFile):
        print('Using saved TLE data {} ({})'.format(
            sourceFile, time.ctime(os.path.getmtime(sourceFile))))
    else:
        print('Retrieving TLE data from {}'.format(sourceUrl))
        file = URLopener()
        try:
            file.retrieve(sourceUrl, sourceFile)
        except:
            print("Error: Failed to get TLE data")
            return None
        else:
            print('{} updated'.format(sourceFile))

    if sourceFile.lower().endswith('.zip'):
        print('Unzipping {}...'.format(sourceFile))
        zip = zipfile.ZipFile(sourceFile)
        zip.extractall('.')
        sourceFile = zip.namelist()[0]
        print('Extracted {}'.format(zip.namelist()))

    tempContent = []
    with open(sourceFile) as f:
        for aline in f:
            tempContent.append(aline.replace('\n', ''))
        print(len(tempContent) // 3, 'TLEs loaded from {}'.format(sourceFile))

    return tempContent
Beispiel #3
0
 def __init__(self, url):
     super(HttpDB, self).__init__()
     self.baseurl = url._replace(fragment="").geturl()
     self.db = urlop = URLopener()
     for hdr, val in (tuple(x.split("=", 1)) if "=" in x else (x, "")
                      for x in url.fragment.split("&") if x):
         urlop.addheader(hdr, val)
    def read_component_sitemap(self, sitemapindex_uri, sitemap_uri, sitemap, sitemapindex_is_file):
        """Read a component sitemap of a Resource List with index.

        Each component must be a sitemap with the 
        """
        if (sitemapindex_is_file):
            if (not self.is_file_uri(sitemap_uri)):
                # Attempt to map URI to local file
                remote_uri = sitemap_uri
                sitemap_uri = self.mapper.src_to_dst(remote_uri)
                self.logger.info("Mapped %s to local file %s" % (remote_uri, sitemap_uri))
            else:
                # The individual sitemaps should be at a URL (scheme/server/path)
                # that the sitemapindex URL can speak authoritatively about
                if (self.check_url_authority and
                    not UrlAuthority(sitemapindex_uri).has_authority_over(sitemap_uri)):
                    raise ListBaseIndexError("The sitemapindex (%s) refers to sitemap at a location it does not have authority over (%s)" % (sitemapindex_uri,sitemap_uri))
        try:
            fh = URLopener().open(sitemap_uri)
            self.num_files += 1
        except IOError as e:
            raise ListBaseIndexError("Failed to load sitemap from %s listed in sitemap index %s (%s)" % (sitemap_uri,sitemapindex_uri,str(e)))
        # Get the Content-Length if we can (works fine for local files)
        try:
            self.content_length = int(fh.info()['Content-Length'])
            self.bytes_read += self.content_length
        except KeyError:
            # If we don't get a length then c'est la vie
            pass
        self.logger.info( "Reading sitemap from %s (%d bytes)" % (sitemap_uri,self.content_length) )
        component = sitemap.parse_xml( fh=fh, sitemapindex=False )
        # Copy resources into self, check any metadata
        for r in component:
            self.resources.add(r)
Beispiel #5
0
    def _verify_and_download(self):
        """check if file is where it should and download if not"""
        if path.isfile(self._path):
            return
        # File does not exist, so we have to download it.
        epic_id = int(self.epic_id)
        d1 = epic_id - epic_id % 100000
        d2 = epic_id % 100000 - epic_id % 1000
        url_template = 'https://archive.stsci.edu/missions/k2/target_pixel_files/c{0:d}/{1:d}/{2:05d}/{3}'
        url_to_load = url_template.format(self.campaign, d1, d2,
                                          self.file_name)

        fmt = "Downloading {:} ..... "
        print(fmt.format(self.file_name), end='', file=sys.stderr, flush=True)
        url_retriever = URLopener()
        try:
            url_retriever.retrieve(url_to_load, self._path)
        except exceptions:
            print("", file=sys.stderr, flush=True)
            raise IOError(
                "\n\nFailed to download file {:}\n\n".format(url_to_load))
        if not path.isfile(self._path):
            print("", file=sys.stderr, flush=True)
            raise IOError('Download of\n' + url_to_load + '\nto\n' +
                          self._path + 'somehow failed')
        print(" done", file=sys.stderr, flush=True)
Beispiel #6
0
def install_mpt(install_path, url=DEFAULT_MPT_URL):
    """
    Install MyPyTutor to the given directory.

    Args:
      install_path (str): The directory to install MyPyTutor in.
      url (str, optional): The URL of the MyPyTutor file to use.

    """
    # create our install path if it doesn't already exist
    if not os.path.exists(install_path):
        os.makedirs(install_path)

    print('Installing MyPyTutor...', end='', flush=True)

    # grab the latest zip file
    # we use an explicit filename here because we don't yet have access
    # to the tutorlib module for abstracting away temporary file creation
    try:
        urlobj = URLopener()
        filename, _ = urlobj.retrieve(url, 'MyPyTutor.zip')
    except Exception:
        print('failed')
        sys.exit(1)

    # extract the file
    with ZipFile(filename) as zf:
        zf.extractall(install_path)

    print('done')
    def read_file_content(self, file_url=None):
        """Return name of temp file in which remote file is saved."""
        if not file_url:
            file_url = self.url
            pywikibot.warning("file_url is not given. "
                              "Set to self.url by default.")
        pywikibot.output(u'Reading file %s' % file_url)
        resume = False
        rlen = 0
        _contents = None
        dt = 15
        uo = URLopener()
        retrieved = False

        while not retrieved:
            if resume:
                pywikibot.output(u"Resume download...")
                uo.addheader('Range', 'bytes=%s-' % rlen)

            infile = uo.open(file_url)

            if 'text/html' in infile.info().getheader('Content-Type'):
                pywikibot.output(u"Couldn't download the image: "
                                 "the requested URL was not found on server.")
                return

            content_len = infile.info().getheader('Content-Length')
            accept_ranges = infile.info().getheader('Accept-Ranges') == 'bytes'

            if resume:
                _contents += infile.read()
            else:
                _contents = infile.read()

            infile.close()
            retrieved = True

            if content_len:
                rlen = len(_contents)
                content_len = int(content_len)
                if rlen < content_len:
                    retrieved = False
                    pywikibot.output(
                        u"Connection closed at byte %s (%s left)" %
                        (rlen, content_len))
                    if accept_ranges and rlen > 0:
                        resume = True
                    pywikibot.output(u"Sleeping for %d seconds..." % dt)
                    time.sleep(dt)
                    if dt <= 60:
                        dt += 15
                    elif dt < 360:
                        dt += 60
            else:
                pywikibot.log(
                    u"WARNING: length check of retrieved data not possible.")
        handle, tempname = tempfile.mkstemp()
        with os.fdopen(handle, "wb") as t:
            t.write(_contents)
        return tempname
Beispiel #8
0
def get_imagelinks(url):
    """Given a URL, get all images linked to by the page at that URL."""
    # Check if BeautifulSoup is imported.
    if isinstance(BeautifulSoup, ImportError):
        raise BeautifulSoup

    links = []
    uo = URLopener()
    with uo.open(url) as f:
        soup = BeautifulSoup(f.read())

    if not shown:
        tagname = "a"
    elif shown == "just":
        tagname = "img"
    else:
        tagname = ["a", "img"]

    for tag in soup.findAll(tagname):
        link = tag.get("src", tag.get("href", None))
        if link:
            ext = os.path.splitext(link)[1].lower().strip('.')
            if ext in fileformats:
                links.append(urllib.basejoin(url, link))
    return links
Beispiel #9
0
def pose_video(datum):
    if not exists(datum['video']):
        try:
            URLopener().retrieve(datum["video_url"], datum["video"])
        except Exception as e:
            makedir(datum["pose_dir"]) # Empty directory

    if not exists(datum["pose_dir"]):
        gpu = get_empty_gpu()

        # Create Container
        container_id = Docker.create_container(DOCKER_NAME, "-it -v " + datum["video"] + ":/video.mp4")

        def remove_container():
            Docker.remove_container(container_id)

        try:
            # Start Container
            Docker.start_container(container_id)

            cmd = "./build/examples/openpose/openpose.bin --video /video.mp4 --model_pose BODY_25 --display 0 --render_pose 0 --write_json /out/ --hand --face --num_gpu 1 "
            cmd += " --num_gpu_start " + str(gpu)
            Docker.exec_container(container_id, "bash -c 'cd /openpose && " + cmd + "'")

            # Copy files
            Docker.cp_container_directory(container_id, datum["pose_dir"], "/out/")
        except Exception as e:
            remove_container()
            raise e
        finally:
            remove_container()

    return True
Beispiel #10
0
    def parse(self, uri=None, fh=None, str_data=None, **kwargs):
        """Parse a single XML document for this list.

        Accepts either a uri (uri or default if parameter not specified), 
        or a filehandle (fh) or a string (str_data). Note that this method
        does not handle the case of a sitemapindex+sitemaps.

        LEGACY SUPPORT - the parameter str may be used in place of str_data
        but is deprecated and will be removed in a later version.
        """
        if (uri is not None):
            try:
                fh = URLopener().open(uri)
            except IOError as e:
                raise Exception(
                    "Failed to load sitemap/sitemapindex from %s (%s)" %
                    (uri, str(e)))
        elif (str_data is not None):
            fh = io.StringIO(str_data)
        elif ('str' in kwargs):
            # Legacy support for str argument, see
            # https://github.com/resync/resync/pull/21
            # One test for this in tests/test_list_base.py
            self.logger.warn(
                "Legacy parse(str=...), use parse(str_data=...) instead")
            fh = io.StringIO(kwargs['str'])
        if (fh is None):
            raise Exception("Nothing to parse")
        s = self.new_sitemap()
        s.parse_xml(fh=fh,
                    resources=self,
                    capability=self.capability_name,
                    sitemapindex=False)
        self.parsed_index = s.parsed_index
Beispiel #11
0
def downloadAsset(uri, dirname):
    tUrl = uri
    o = urlparse(tUrl)
    contentType = ""
    # targetDir = os.path.join(CURRENT_DIRECTORY, dirname, '/'.join(o.path.split('/')[1:-1]))
    targetDir = CURRENT_DIRECTORY + '/' + dirname + '/' + '/'.join(
        o.path.split('/')[1:-1])

    # javascript, fragment의 경우 다운로드 불필요
    if o.scheme == "javascript" or (o.netloc == '' and o.path == ''):
        return

    if o.scheme == "":
        if uri.startswith("//"):
            tUrl = f"https:{uri}"
        else:
            tUrl = f"https://{uri}"

    try:
        contentType = getContentType(tUrl)
    except Exception:
        try:
            if uri.startswith('//'):
                tUrl = f"http:{uri}"
            else:
                tUrl = f"http://{uri}"
            contentType = getContentType(tUrl)
        except Exception:
            pass
            # raise Exception("Error during connection")
    else:
        # text/html 무시
        if contentType in mimeTypes[1:]:
            if not os.path.exists(targetDir):
                path = Path(targetDir)
                path.mkdir(parents=True)

            targetFile = targetDir + '/' + o.path.split('/')[-1]
            if not os.path.exists(targetFile):
                try:
                    urlretrieve(tUrl, targetFile)
                    print(f"[Retrieved] {targetFile}")
                except Exception:
                    try:
                        opener = URLopener()
                        opener.addheader('User-Agent', 'Mozilla/5.0')
                        filename, headers = opener.retrieve(tUrl, targetFile)
                    except Exception:
                        try:
                            tUrl = tUrl.replace('www.', '')
                            tUrl = tUrl.replace('http:', 'https:')
                            filename, headers = opener.retrieve(
                                tUrl, targetFile)
                        except Exception as e:
                            print(str(e))
                            raise Exception

        else:
            pass
Beispiel #12
0
def call_api(url, wait=1):
    time.sleep(wait)
    req = URLopener()
    req.addheader('Authorization', 'token ' + TOKEN)
    fp = req.open(url)
    data = json.load(fp)
    fp.close()
    return data
Beispiel #13
0
    def read(self, uri=None, resources=None, index_only=False):
        """Read sitemap from a URI including handling sitemapindexes.

        If index_only is True then individual sitemaps references in a sitemapindex
        will not be read. This will result in no resources being returned and is
        useful only to read the metadata and links listed in the sitemapindex.

        Includes the subtlety that if the input URI is a local file and is a
        sitemapindex which contains URIs for the individual sitemaps, then these
        are mapped to the filesystem also.
        """
        try:
            fh = URLopener().open(uri)
            self.num_files += 1
        except IOError as e:
            raise IOError("Failed to load sitemap/sitemapindex from %s (%s)" %
                          (uri, str(e)))
        # Get the Content-Length if we can (works fine for local files)
        try:
            self.content_length = int(fh.info()['Content-Length'])
            self.bytes_read += self.content_length
            self.logger.debug("Read %d bytes from %s" %
                              (self.content_length, uri))
        except KeyError:
            # If we don't get a length then c'est la vie
            self.logger.debug("Read ????? bytes from %s" % (uri))
            pass
        self.logger.info("Read sitemap/sitemapindex from %s" % (uri))
        s = self.new_sitemap()
        s.parse_xml(fh=fh, resources=self, capability=self.capability_name)
        # what did we read? sitemap or sitemapindex?
        if (s.parsed_index):
            # sitemapindex
            if (not self.allow_multifile):
                raise ListBaseIndexError(
                    "Got sitemapindex from %s but support for sitemapindex disabled"
                    % (uri))
            self.logger.info("Parsed as sitemapindex, %d sitemaps" %
                             (len(self.resources)))
            sitemapindex_is_file = self.is_file_uri(uri)
            if (index_only):
                # don't read the component sitemaps
                self.sitemapindex = True
                return
            # now loop over all entries to read each sitemap and add to
            # resources
            sitemaps = self.resources
            self.resources = self.resources_class()
            self.logger.info("Now reading %d sitemaps" % len(sitemaps.uris()))
            for sitemap_uri in sorted(sitemaps.uris()):
                self.read_component_sitemap(uri, sitemap_uri, s,
                                            sitemapindex_is_file)
        else:
            # sitemap
            self.logger.info("Parsed as sitemap, %d resources" %
                             (len(self.resources)))
def tryToReadFile(filePath, urlData):
    contents = ""
    try:
        fileHandle = URLopener().open(filePath)
        contents = fileHandle.read()
    except IOError:
        contents = "mitmProxy - Resource Override: Could not open " + filePath + \
         " Came from rule: " + urlData[0] + " , " + urlData[1]

    return contents
Beispiel #15
0
 def stop(self):
     """Stops the server."""
     self.stop_serving = True
     try:
         # This is to force stop the server loop
         URLopener().open("http://%s:%d" % (self.host, self.port))
     except IOError:
         pass
     log.info("Shutting down the webserver")
     self.thread.join()
Beispiel #16
0
 def stop(self) -> None:
     """Stops the server."""
     self.stop_serving = True
     try:
         # This is to force stop the server loop
         URLopener().open(f"http://{self.host}:{self.port}")
     except OSError:
         pass
     log.info("Shutting down the webserver")
     self.thread.join()
Beispiel #17
0
    def download(self, entity_id: int, destination: str = None, sort: List[Sort] = None) -> str:
        """
        Download sequences from a single entity.
        """

        sort = [Sort('id', 'asc')] if sort is None else sort
        sort = list(sort_item.to_json() for sort_item in sort) if sort else []
        body = {'filter': [], 'selection': [], 'sort': sort}
        file_path = Sequences.get_filepath_for_entity_id(entity_id)
        url = '{}/entities/{}/_extract'.format(self.url, entity_id)
        print('Downloading shards from "{}" to "{}".'.format(url, file_path))

        paths = []
        with self.session.post(url, stream=True, timeout=10 * 60, json=body) as response:
            try:
                links = response.json()
                print('links', links)
                if 'statusCode' in links and links['statusCode'] != 200:
                    raise Exception(links['message'])
                elif len(links) == 0:
                    raise Exception(
                        'Sequences:download - Error; no download links for {}. Does the table exist?'.format(entity_id))

                index = 0
                for link in links:
                    testfile = URLopener()
                    path = '{}-{}.gz'.format(file_path, index)
                    paths.append(path)
                    testfile.retrieve(link, path)
                    index = index + 1

            except Exception as e:
                print('Sequences:download - error:', e)
                raise e

        sorted_paths = self.get_sorted_file_shard_list(entity_id, paths, [])

        print(f'Unzipping: entity_id={entity_id} to destination={destination}')

        skip_first = False

        with open(destination, 'wb+') as target_file:
            for file_shard in sorted_paths:
                with gzip.open(file_shard, 'rb') as g_zip_file:
                    first_line = True
                    for line in g_zip_file:
                        # We skip the first line of every file, except for the very first.
                        if not (first_line and skip_first):
                            line = Sequences.sanitize(line.decode("utf-8"))
                            target_file.write(line.encode("utf-8"))
                        first_line = False
                # We skip the first line of every file, except for the very first.
                skip_first = True

        return destination
Beispiel #18
0
def save_downloaded_file(context):
    """
    Saves POEditor terms to a file in output dir

    :param context: behave context
    :return: N/A
    """
    file_path = get_poeditor_file_path(context)
    saved_file = URLopener()
    saved_file.retrieve(context.poeditor_download_url, file_path)
    context.logger.info('POEditor terms have been saved in "%s" file' %
                        file_path)
Beispiel #19
0
def download_text_file(url, file_name):
    opener = URLopener()
    file_name = file_name.split("/")[-1]
    file_name = file_name.replace("%20", " ")
    if _is_absolute_link(file_name):
        url = file_name
        if not url.startswith("http://"):
            url = "http://" + url
        out_name = file_name.split("/")[-1]
    else:
        url = "{}{}".format(url, file_name)
        out_name = file_name
    opener.retrieve(url, file_name)
    return out_name
Beispiel #20
0
def _download_file(url, destination):
    logger.info('Downloading %s to %s...', url, destination)

    response = _open_url(url)

    if not response.code == 200:
        raise WagonError("Failed to download file. Request to {0} "
                         "failed with HTTP Error: {1}".format(
                             url, response.code))
    final_url = response.geturl()
    if final_url != url and is_verbose():
        logger.debug('Redirected to %s', final_url)
    f = URLopener()
    f.retrieve(final_url, destination)
Beispiel #21
0
def download_audio_file(url, guid):
    """Download the audio file for the lecture.

    Downloads the mps audio recording of the lecture. File is stored in the
    tempoary folder named using the GUID

    Args:
        - url (str): The lecture's base url
        - guid (str): The lecture's guid

    """
    print("\nDownloading audio file")
    URLopener().retrieve(url + "/audio.mp3",
                         os.path.join(DOWNLOAD_DIRECTORY, guid, "audio.mp3"))
Beispiel #22
0
def download_update(config, download):
    """Download and new Plex package."""
    download_name = f"pms_{download['version']}{os.path.splitext(download['link'])[1]}"
    download_target = os.path.join(config['folder'], download_name)

    # If we've already downloaded this file, remove it
    if os.path.exists(download_target):
        os.remove(download_target)

    # Download the file
    download_path = URLopener().retrieve(download['link'], download_target)

    # Make sure the file exists
    return os.path.exists(download_path[0]), download_path[0]
Beispiel #23
0
def download_data():
    """This function downloads the data, extract them and remove the archive."""
    if not os.path.exists(DATA_HOME):
        print("Data are missing. Downloading them now...", end="", flush=True)
        datafile = URLopener()
        datafile.retrieve(DOWNLOAD_URL, ARCHIVE_FNAME)
        print("Ok.")
        print("Extracting now...", end="", flush=True)
        tf = tarfile.open(ARCHIVE_FNAME)
        tf.extractall()
        print("Ok.")
        print("Removing the archive...", end="", flush=True)
        os.remove(ARCHIVE_FNAME)
        print("Ok.")
def downloadFile(linkStore):
    for imgUrl in linkStore:
        try:
            #removing double slash from the start of url
            imgUrl = urlEdit(imgUrl[2:])
            fileName = imgUrl.split("/")[-1]
            imgUrl = 'https://' + imgUrl
            print('Downloading file: ' + fileName + '\tURL: ' + imgUrl + '\n')
            image = URLopener()
            image.retrieve(imgUrl, fileName)
            # above line may create error due to 403 forbidden response
        except:
            print("Error occured while downloading file: " + imgUrl + '\n')
        continue
Beispiel #25
0
def download_swf_video_file(time, url, guid):
    """Download single swf video file.

    The download is stored in the temporary folder named using the GUID

    Args:
        - time (int): Time code to be downloaded
        - url (str): The lecture's base url
        - guid (str): The lecture's guid

    """
    URLopener().retrieve(url+"/slides/"+'{0:08d}'.format(time)+".swf",
                         os.path.join(DOWNLOAD_DIRECTORY, guid, '{0:08d}'.format(time)+".swf"),
                         reporthook=download_progress_bar)
Beispiel #26
0
    def _copy_artifacts_s3(self, scratchdir, original_artifact_urls):
        # before we do anything else, verify that the upload directory doesn't already exist, to
        # avoid automatically stomping on a previous release. if you *want* to do this, you must
        # manually delete the destination directory first. (and redirect stdout to stderr)
        cmd = 'aws s3 ls --recursive {} 1>&2'.format(self._release_artifact_s3_dir)
        ret = self._run_cmd(cmd, False, 1)
        if ret == 0:
            raise Exception('Release artifact destination already exists. ' +
                            'Refusing to continue until destination has been manually removed:\n' +
                            'Do this: aws s3 rm --dryrun --recursive {}'.format(self._release_artifact_s3_dir))
        elif ret > 256:
            raise Exception('Failed to check artifact destination presence (code {}). Bad AWS credentials? Exiting early.'.format(ret))
        logger.info('Destination {} doesnt exist, proceeding...'.format(self._release_artifact_s3_dir))

        for i in range(len(original_artifact_urls)):
            progress = '[{}/{}]'.format(i + 1, len(original_artifact_urls))
            src_url = original_artifact_urls[i]
            filename = src_url.split('/')[-1]

            local_path = os.path.join(scratchdir, filename)
            dest_s3_url = '{}/{}'.format(self._release_artifact_s3_dir, filename)

            # TODO: this currently downloads the file via http, then uploads it via 'aws s3 cp'.
            # copy directly from src bucket to dest bucket via 'aws s3 cp'? problem: different credentials

            # download the artifact (dev s3, via http)
            if self._dry_run:
                # create stub file to make 'aws s3 cp --dryrun' happy:
                logger.info('[DRY RUN] {} Downloading {} to {}'.format(progress, src_url, local_path))
                stub = open(local_path, 'w')
                stub.write('stub')
                stub.flush()
                stub.close()
                logger.info('[DRY RUN] {} Uploading {} to {}'.format(progress, local_path, dest_s3_url))
                ret = os.system('aws s3 cp --dryrun --acl public-read {} {} 1>&2'.format(
                    local_path, dest_s3_url))
            else:
                # download the artifact (http url referenced in package)
                logger.info('{} Downloading {} to {}'.format(progress, src_url, local_path))
                URLopener().retrieve(src_url, local_path)
                # re-upload the artifact (prod s3, via awscli)
                logger.info('{} Uploading {} to {}'.format(progress, local_path, dest_s3_url))
                ret = os.system('aws s3 cp --acl public-read {} {} 1>&2'.format(
                    local_path, dest_s3_url))
            if not ret == 0:
                raise Exception(
                    'Failed to upload {} to {}. '.format(local_path, dest_s3_url) +
                    'Partial release directory may need to be cleared manually before retrying. Exiting early.')
            os.unlink(local_path)
Beispiel #27
0
def scrape_pokemon_image(url):
    req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
    page = urlopen(req).read()
    soup = BeautifulSoup(page, 'html.parser')

    images = soup.find_all('img')
    image_link = images[0].get('src')

    print("[INFO] downloading {}".format(image_link))
    name = str(image_link.split('/')[-1])
    opener = URLopener()
    opener.addheader('User-Agent', 'Mozilla/5.0')
    opener.retrieve(image_link, os.path.join('data/images/', name))

    print(image_link)
Beispiel #28
0
    def initUrllibInstance(self, server):
        printl("", self, "S")

        # we establish the connection once here
        self.urllibInstance = URLopener()

        # we add headers only in special cases
        connectionType = self.serverConfig.connectionType.value
        localAuth = self.serverConfig.localAuth.value

        if connectionType == "2" or localAuth:
            authHeader = self.plexInstance.get_hTokenForServer(server)
            self.urllibInstance.addheader("X-Plex-Token",
                                          authHeader["X-Plex-Token"])

        printl("", self, "C")
Beispiel #29
0
    def on_update_button_click(self):
        try:
            opener = URLopener()
            opener.retrieve(self.REMOTE_UPDATE_URL,
                            "resources/parameters.json")

            # Read the new settings.
            self.data = read_settings()
            messagebox.showinfo(
                "Settings Update",
                "Settings successfully updated from the server.")
        except Exception as e:
            logging.critical(
                "Couldn't open the remote settings file: {0}".format(str(e)))
            messagebox.showerror("Couldn't Update Settings",
                                 "Couldn't open the remote settings file.")
Beispiel #30
0
def Download_File(name):
    """ Download UCAC4 file. """

    url_name = prefix+name
    ucac_file = URLopener()
    ucac_file.retrieve(url_name, name)

    inp = open(name, 'rb')
    bz2_file = bz2.BZ2File(name+'.bz2', 'wb', compresslevel=1)
    copyfileobj(inp, bz2_file)
    inp.close()
    bz2_file.close()

    os.remove(name)

    return 0