Esempio n. 1
0
 def gtfs_download(self, url, dt, zone):
     """Do downloading of one file."""
     print("Downloading", self.slug, url, zone, dt)
     # Use only standard library functions to avoid dependencies.
     #furl = urllib.urlopen(url)
     opener = FancyURLopener()
     # We have to set up an authentication method on the opener if
     # we will need to authenticate.  This does HTTP BASIC only so
     # far.
     if 'authentication' in self.data:
         auth_name = self.data['authentication']
         auth = auth_data['sites'][auth_name]
         # A callback method which performs the authentication.
         # Return (user, pass) tuple.
         opener.prompt_user_passwd = \
             lambda host, realm: (auth['username'], auth['password'])
         # URL parameters auth method
         if 'url_suffix' in auth:
             url = url + auth['url_suffix']
     if "{API_KEY}" in url:
         try:
             auth_name = self.data['authentication']
         except KeyError:
             auth_name = self.name
         auth = auth_data['sites'][auth_name]
         url = url.format(API_KEY=auth['API_KEY'])
     # Make GTFS path.
     gtfs_path = self.path_gtfszip(dt, zone)
     util.makedirs(os.path.dirname(gtfs_path))
     # Open the URL.
     print("**** Connecting to %s" % url)
     # Open GTFS and relay data from web to file.
     with util.create_file(gtfs_path) as tmp_gtfs_path:
         opener.retrieve(url, tmp_gtfs_path)
     self.test_corrupted_zip(gtfs_path)
Esempio n. 2
0
            def _download() -> None:
                debug('Downloading server from', url)
                target = sublime.active_window()
                label = 'Downloading PromQL language server'

                with ActivityIndicator(target, label):
                    try:
                        opener = FancyURLopener()
                        tmp_file, _ = opener.retrieve(url)

                        if not checksum_verified(checksum, tmp_file):
                            debug('Checksum error.')
                            sublime.status_message('Server binary',
                                                   os.path.basename(tmp_file),
                                                   'checkusm error.')
                            return

                        # extract and copy the cache
                        with tarfile.open(tmp_file) as tf:
                            tf.extractall(self._cache_path)

                        os.unlink(tmp_file)

                        self._ready = True
                    except Exception as ex:
                        debug('Failed downloading server:', ex)
                    finally:
                        opener.close()
Esempio n. 3
0
 def __init__(self, not_found_tries=30, not_found_delay=60,
                    *args, **kwargs):
   FancyURLopener.__init__(self, *args, **kwargs)
   self.tries = 0
   self.total_tries = not_found_tries
   self.not_found_delay = not_found_delay
   self.found = False # ever had anything not 404
Esempio n. 4
0
                    def ach5():
                        z = t7.get("1.0", END)
                        print(f)
                        print(k)
                        print(g)

                        sql1 = "SELECT mobile FROM customer_register WHERE email = '" + g + "' "
                        sql = "UPDATE complaints SET solution ='" + z + "' WHERE cid = '" + g + "' AND comp_heading = '" + k + "' "
                        db = pymysql.connect("localhost", "root", "", "cms")
                        c = db.cursor()
                        c.execute(sql)
                        try:
                            c.execute(sql1)
                            achaa = c.fetchone()
                            db.commit()
                            print(achaa)
                            tinn = FancyURLopener()
                            DL = []
                            for i in achaa:
                                DL.append(i)
                            ok = achaa[0]
                            phone = ok
                            yourmsg = "Hi customer, The solution of the complain '" + k + "' on '" + g + "' has been provided to your registered account. Kindly check it. \nThank you \nCMS "
                            page = tinn.open(
                                'http://5.189.169.241:5012/api/SendSMS?api_id=API245772015763&api_password=12345678&sms_type=T&encoding=T&sender_id=BLKSMS&phonenumber='
                                + str(phone) + '&textmessage="' + yourmsg +
                                '"')
                        except:
                            print("Invalid No.")
                        db.close()

                        tech1()
Esempio n. 5
0
 def __init__(self, *args):
     FancyURLopener.__init__(self, *args)
     for i, (header, val) in enumerate(self.addheaders):
         if header == "User-Agent":
             del self.addheaders[i]
             break
     self.addheader("User-Agent", "OSMViz/1.1.0 +https://hugovk.github.io/osmviz")
def getFeed(url):
    try:
        opener = FancyURLopener()
        page = opener.open(url)
        contents = page.read().decode('utf-8')
        feed=feedparser.parse(contents)
        return feed
    except:
        return None
def getFeed(url):
    try:
        opener = FancyURLopener()
        page = opener.open(url)
        contents = page.read().decode('utf-8')
        feed = feedparser.parse(contents)
        return feed
    except:
        return None
Esempio n. 8
0
 def __init__(self,
              not_found_tries=30,
              not_found_delay=60,
              *args,
              **kwargs):
     FancyURLopener.__init__(self, *args, **kwargs)
     self.tries = 0
     self.total_tries = not_found_tries
     self.not_found_delay = not_found_delay
     self.found = False  # ever had anything not 404
Esempio n. 9
0
def getBashorg():  #Получить цитату с баша
    FancyURLopener.version = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0'
    myopener = FancyURLopener()
    page = myopener.open('https://bash.im/forweb/')
    s = page.read().decode("cp1251").replace("<\' + \'br>", "\\n").replace(
        "&quot;", "\"").replace("&lt;", "<").replace("&gt;", ">")
    page.close()
    strt = s.index('id="b_q_t" style="padding: 1em 0;">') + len(
        'id="b_q_t" style="padding: 1em 0;">')
    s = s[strt:]
    return s[:s.index("<\'")]
Esempio n. 10
0
	def __init__(self, *, robot=None, max_tries=2):
		"""
		@param robot le robot à utiliser, par default pas de robot utilisé, toutes les urls sont visitées
		@param max_tries le nombre maximum d'essais  d'ouverture de l'url
		"""
		FancyURLopener.__init__(self)
		self.addheader('User-agent', 'Galopa')
		
		self.robot = robot

		self.max_tries = max_tries
Esempio n. 11
0
    def __init__(self, args, loadOnly):
        with open(args) as f:
            self.conf = yaml.load(f)
        self.loadOnly = loadOnly
        es_hosts = self.conf.get("elasticsearch")
        print("###")
        print("# SPARQL endpoint: " + self.conf["sparql"]["uri"])
        print("# ElasticSearch: %s" % es_hosts)
        print("###")
        self.es = Elasticsearch(es_hosts)

        self.urlOpener = FancyURLopener()
        self.urlOpener.addheader(
            "Accept", "application/sparql-results+json, applicaton/json;q=0.1")
Esempio n. 12
0
 def __init__(self, *args, **kwargs):
     self._last_url = u''
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     for header in ('User-Agent', 'User-agent', 'user-agent'):
         self.del_header(header)
     self.set_header('User-Agent', 'Mozilla/5.0')
     self.set_header('Accept-Language', 'en-us,en;q=0.5')
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = 'uu=%s; id=%s' % (_cookie_uu, _cookie_id)
     self.set_header('Cookie', c_header)
Esempio n. 13
0
    def resolve(self):
        urlopener = FancyURLopener()
        try:
            unresolved = []
            for artifact in self.artifacts:
                if not self._download_artifact(urlopener, artifact):
                    unresolved.append(artifact)
                    self.logger.error("Could not resolve artifact %s." % artifact.name)

            if len(unresolved) > 0:
                raise ArtifactUnresolvedException(unresolved)
        except KeyboardInterrupt as e:
            raise TequilaException('Download interrupted by user.') from e
        finally:
            urlopener.close()
Esempio n. 14
0
 def __init__(self, *args, **kwargs):
     self._last_url = u''
     FancyURLopener.__init__(self, *args, **kwargs)
     # Headers to add to every request.
     # XXX: IMDb's web server doesn't like urllib-based programs,
     #      so lets fake to be Mozilla.
     #      Wow!  I'm shocked by my total lack of ethic! <g>
     for header in ('User-Agent', 'User-agent', 'user-agent'):
         self.del_header(header)
     self.set_header('User-Agent', 'Mozilla/5.0')
     self.set_header('Accept-Language', 'en-us,en;q=0.5')
     # XXX: This class is used also to perform "Exact Primary
     #      [Title|Name]" searches, and so by default the cookie is set.
     c_header = 'uu=%s; id=%s' % (_cookie_uu, _cookie_id)
     self.set_header('Cookie', c_header)
Esempio n. 15
0
	def open(self, url, data=None, max_tries=None):
		"""
		Ouvrir une url
		@param url
		@param data les data (POST)
		@param max_tries le nombre maximum d'essais, si non préciser la valeur donnée lors de l'initialisation sera prise
		@return un stream
		@throw ExceptionMaxTries quand le nombre maximum de tentatives est atteind
		@throw ExceptionUrlForbid quand le robot n'a pas le droit de visiter l'url
		"""
		if not max_tries:
			max_tries = self.max_tries
		else:
			self.max_tries = max_tries
		if not self.robot or self.robot.can_fetch(url):
			for _ in range(max_tries):
				try:
					stream = FancyURLopener.open(self, url, data)
				except Exception as ex:
					error = get_traceback()+"\n"+str(ex)
				else:
					return stream
			else:
				raise ExceptionMaxTries("max tries %s : %s" % (url, error))
		else:
			raise ExceptionUrlForbid("robots can't access to %s" % url)
Esempio n. 16
0
def download(who, url):
    tmp = FancyURLopener()
    url = tmp.open(url)
    if 2 < sys.version_info.major:
        path = url.info().get_filename()
    else:
        path = url.info().get('Content-Disposition').split('=')[1].strip('"')
    path = os.path.basename(path)
    with open(path, 'wb') as f:
        while True:
            tmp = url.read(4096)
            if 0 == len(tmp):
                break
            f.write(tmp)
        f.close()
    return path
Esempio n. 17
0
    def _download_and_extract(self, url, extract_to, ext='zip'):
        def _progress(count, block_size, total_size):
            if total_size > 0:
                print('\r>> Downloading %s %.1f%%' %
                      (url,
                       float(count * block_size) / float(total_size) * 100.0),
                      end=' ')
            else:
                print('\r>> Downloading %s' % (url), end=' ')
            sys.stdout.flush()

        urlretrieve = FancyURLopener().retrieve
        local_zip_path = os.path.join(self.data_dir, 'tmp.' + ext)
        urlretrieve(url, local_zip_path, _progress)
        sys.stdout.write("\n>> Finished downloading. Unzipping...\n")
        if ext == 'zip':
            with zipfile.ZipFile(local_zip_path, "r") as zip_ref:
                zip_ref.extractall(extract_to)
        else:
            with rarfile.RarFile(local_zip_path, "r") as zip_ref:
                zip_ref.extractall(extract_to)

        sys.stdout.write(">> Finished unzipping.\n")
        os.remove(local_zip_path)

        self.clear_statistics()
Esempio n. 18
0
    def dropEvent(self, ev):
        from os.path import basename
        from urllib.request import FancyURLopener
        from base64 import b64encode
        import imghdr

        c = self.cursorForPosition(ev.pos())
        s = ev.mimeData().text().split('\n')
        for url in s:
            url = url.strip()
            if len(url):
                data = FancyURLopener().open(url).read()
                t = imghdr.what(None, h=data)
                data = b64encode(data).decode('utf-8')
                if t is None:
                    continue
                if c.block().length() != 1:
                    c.insertBlock()
                if c.block().previous().length() != 1:
                    c.insertBlock()
                data = 'data:image/' + t + ';base64,' + data
                c.insertText('![{0}]({1})'.format(basename(url), data))
                if c.block().next().length() != 1:
                    c.insertBlock()
                else:
                    c.movePosition(QTextCursor.NextBlock)

        self.setTextCursor(c)

        mimeData = QMimeData()
        mimeData.setText("")
        dummyEvent = QDropEvent(ev.posF(), ev.possibleActions(), mimeData,
                                ev.mouseButtons(), ev.keyboardModifiers())

        super(MarkdownEditor, self).dropEvent(dummyEvent)
Esempio n. 19
0
 def run(self):
     opener = FancyURLopener()
     try:
         remotefile = opener.open(self.url)
     except IOError:
         logic.logger.new("Unable to connect to internet", 'ERROR')
         return
     if remotefile.getcode() == 404:
         logic.logger.new("PDB file not found on pdb.org", 'ERROR')
         return
     elif remotefile.getcode() >= 500:
         logic.logger.new("PDB.org is currently unavailable", 'ERROR')
         return
     localfile = open(self.pdbFullPath, 'wb')
     localfile.write(remotefile.read())
     localfile.close()
     remotefile.close()
Esempio n. 20
0
 def file_download(self, url):
     urlx = 'http:' + url
     imgname = url.split('/')[-1]
     if imgname.split('.')[-1] == 'gif':
         imgPath = os.path.join(PATH, 'gif', imgname)
     else:
         imgPath = os.path.join(PATH, 'pic', imgname)
     if not os.path.lexists(imgPath):
         # urllib.request.urlretrieve(urlx, imgPath)
         opener = FancyURLopener()
         opener.addheaders.clear()
         opener = self.insert_header(opener, urlx, imgPath)
         with open(imgPath, 'wb+') as f:
             while True:
                 chunk = opener.open(urlx, self._chunk_size)
                 if not chunk: break
                 f.write(chunk)
Esempio n. 21
0
def cmd_download(*urls):
    """
    Download and install an artifact from an url.
    :param urls: the urls to download
    """
    from urllib.request import FancyURLopener
    from tequila.network.maven import ArtifactResolver

    maven_resolver = ArtifactResolver()
    for url in urls:
        maven_resolver.install_plugin_jar(FancyURLopener(), url)
Esempio n. 22
0
    def gen_qrcode(self):
        url = "https://zxing.org/w/chart?cht=qr&chs=350x350&chld=L&choe=UTF-8&chl=" + self.mecard(
        )
        webbrowser.open(url)

        png_file = "{}.png".format(self.fname)
        # urlretrieve(url, png_file)

        # with urlopen(url) as response, open("test.png", 'wb') as out_file:
        #     data = response.read()  # a `bytes` object
        #     out_file.write(data)

        # with urlopen(url) as response, open(png_file, 'wb') as out_file:
        #     shutil.copyfileobj(response, out_file)

        # credit: http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
        # credit: http://stackoverflow.com/questions/31893207/python-3-4-http-error-505-retrieving-json-from-url
        url_request = FancyURLopener({})
        with url_request.open(url) as response, open(png_file,
                                                     'wb') as out_file:
            shutil.copyfileobj(response, out_file)
Esempio n. 23
0
    def __init__(self, url, level):
        super(myParser, self).__init__()
        sleep(0.1)
        print('Checking URL', url)
        self.__level = level
        self.__done = False
        self.__currentlyParsingDeadATag = False
        self.__currentlyParsingTitleTag = False
        self.__url = url
        self.linkWasDead = False
        parsedURLs.append(self.__url)
        try:
            opener = FancyURLopener({})
            f = opener.open(self.__url)
            data = f.read()
        except ssl.SSLError:
            return
        except OSError:
            return
        except ValueError:
            if not self.__url in deadURLs:
                print()
                print('Found a dead link:', self.__url)
                deadURLs.append(self.__url)
                self.linkWasDead = True
            self.__done = True
            return

        try:
            text = data.decode(errors='replace')
        except UnicodeDecodeError:
            pass
            #print('This is a binary file:', self.__url)
        else:
            try:
                self.feed(text)
            except ValueError:
                pass
            except ssl.SSLError:
                pass
Esempio n. 24
0
def download(url, destination, tmp_dir='/tmp'):
    def _progress(count, block_size, total_size):
      sys.stdout.write('\rDownloading %s %.1f%%' % (url,
          float(count * block_size) / float(total_size) * 100.0))
      sys.stdout.flush()
    urlretrieve = FancyURLopener().retrieve
    if url.endswith('.zip'):
        local_zip_path = os.path.join(tmp_dir, 'datasets_download.zip')
        urlretrieve(url, local_zip_path, _progress)
        with zipfile.ZipFile(local_zip_path, "r") as zip_ref:
            zip_ref.extractall(extract_to)
        os.remove(local_zip_path)
    else:
        urlretrieve(url, destination, _progress)
Esempio n. 25
0
def urlopen(url):
    charset = None

    tmp = FancyURLopener()
    tmp = tmp.open(url)

    if 2 < sys.version_info.major:
        charset = tmp.info().get_content_charset()
    else:
        charset = 'windows-1251'

    tmp = tmp.read().decode(charset)
    if str != type(tmp):
        tmp = str(tmp.encode('utf-8'))

    return tmp
Esempio n. 26
0
 def http_error_default(self, url, fp, errcode, errmsg, headers):
     if errcode == 404:
         raise HTTPError(url, errcode, errmsg, headers, fp)
     else:
         FancyURLopener.http_error_default(url, fp, errcode, errmsg,
                                           headers)
Esempio n. 27
0
    'demographic'] = 'http://biomedic.doc.ic.ac.uk/brain-development/downloads/IXI/IXI.xls'

fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'

if DOWNLOAD_IMAGES:
    # Download all IXI data
    for key, url in urls.items():

        if not os.path.isfile(fnames[key]):
            print('Downloading {} from {}'.format(fnames[key], url))
            curr_file = FancyURLopener()
            curr_file.retrieve(url, fnames[key])
        else:
            print('File {} already exists. Skipping download.'.format(
                fnames[key]))

if EXTRACT_IMAGES:
    # Extract the HH subset of IXI
    for key, fname in fnames.items():

        if (fname.endswith('.tar')):
            print('Extracting IXI HH data from {}.'.format(fnames[key]))
            output_dir = os.path.join('./orig/', key)

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
Esempio n. 28
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('data_file',
                        metavar='data_file',
                        type=str,
                        nargs=1,
                        help='file with count data')

    parser.add_argument("-s", "--settings", default=None)
    parser.add_argument("-j", "--jobs", type=int, default=None)
    parser.add_argument("-p",
                        "--preprocessing",
                        choices=['normalize_sf'],
                        default=None)
    parser.add_argument("-n", "--nosubset", action='store_true')
    args = parser.parse_args()

    if args.settings is not None:
        settings = h.load_settings(args.settings)
    else:
        import outpyr.settings as settings

    if validate_web_url(args.data_file[0]):
        if args.data_file[0].endswith('/'):
            data_file = args.data_file[0][:-1].rsplit('/', 1)[1]
        else:
            data_file = args.data_file[0].rsplit('/', 1)[1]

        if os.path.isfile(data_file):
            print(
                'It seems that the provided URL was already downloaded to file %s. Skipping downloading.'
                % data_file)
        else:
            print('Downloading URL', args.data_file[0], '...')

            opener = FancyURLopener({})
            with opener.open(args.data_file[0]) as f:
                text = f.read().decode('utf-8')
            print('Finished!')

            with open('tmp.txt', 'w', encoding='utf-8') as f:
                f.write(text)

            df = h.csv_to_df('tmp.txt')
            if not args.nosubset and (list(
                    df.index)) == helpers_kremer.INDEX_FULL and (list(
                        df.columns)) == helpers_kremer.COLUMNS:
                print('Kremer dataset recognized, filtering genes...')
                df = df.loc[helpers_kremer.INDEX_FILTERED, :]
                print('Done!')
            h.save_df_to_csv(df, data_file)
            os.remove('tmp.txt')
    else:
        data_file = args.data_file[0]
    base_name, ext = os.path.splitext(os.path.basename(data_file))
    dir_ = os.path.abspath(os.path.dirname(data_file))

    pvalues_file = os.path.join(dir_, base_name + '-pvalues.csv')
    pvalues_std_file = os.path.join(dir_, base_name + '-pvalues-std.csv')
    pvalues_sample_adjusted_file = os.path.join(
        dir_, base_name + '-pvalues-adjusted.csv')
    pvalues_gene_adjusted_file = os.path.join(
        dir_, base_name + '-pvalues-gene-adjusted.csv')
    zscores_file = os.path.join(dir_, base_name + '-zscores.csv')

    if os.path.isfile(data_file):
        print('Running OutPyR on', data_file, '...')
        if args.jobs is None:
            gene_subset = None
            output_dir = outpyr.train_cpu_singleprocess.run(
                data_file, settings, args.preprocessing, gene_subset,
                'p_j_r_j_numba')
        else:
            output_dir = outpyr.train_cpu_multiprocess.run(
                data_file, settings, args.preprocessing, args.jobs)

        dir_abs = os.path.join(dir_, output_dir)
        from outpyr import helpers_tensorflow as htf
        ti = htf.TraceInspector(dir_abs)
        if 'p_values_mean' not in ti.v:
            print('Post-sampling: calculating p-values...')
            ti.set_final_values_from_trace()
            print('Done!')
            print('Saving p-values inside of the trace directory...')
            with open(os.path.join(ti.trace_data_folder, 'variables.pickle'),
                      'wb') as f_variables:
                pickle.dump(ti.v, f_variables)
            print('Finished!')

        print('Saving scores as CSV files...')
        h.save_df_to_csv(
            pd.DataFrame(ti.get_p_value_matrix(),
                         index=ti.df.index,
                         columns=ti.df.columns), pvalues_file)
        h.save_df_to_csv(
            pd.DataFrame(np.sqrt(ti.v['p_values_var']),
                         index=ti.df.index,
                         columns=ti.df.columns), pvalues_std_file)

    else:
        parser.error("The file (%s) you provided does not exist." % data_file)
Esempio n. 29
0
 def http_error_default(self, url, fp, errorcode, errmsg, headers):
     if errorcode >= 400:
         raise HTTPError(str(errorcode) + ': ' + errmsg)
     else:
         FancyURLopener.http_error_default(self, url, fp, errorcode, errmsg, headers)
Esempio n. 30
0
 def http_error_default(self, url, fp, errcode, errmsg, headers):
     if errcode == 404:
         raise HTTPError(url, errcode, errmsg, headers, fp)
     else:
         FancyURLopener.http_error_default(url, fp, errcode, errmsg, headers)
Esempio n. 31
0
from urllib.request import FancyURLopener
from bs4 import BeautifulSoup


class AppURLopener(FancyURLopener):
    version = "Mozilla/5.0"


default_agent = FancyURLopener().version
changed_agent = AppURLopener().version
print(default_agent, "->", changed_agent)
url = "http://fd.postech.ac.kr/bbs/today_menu.php?bo_table=weekly&ckattempt=1"
html = AppURLopener().open(url)
result = BeautifulSoup(html, 'html.parser')
print(result)
 def __init__(self, user, pw):
     self.username = user
     self.password = pw
     self.numTries = 0
     FancyURLopener.__init__(self)
Esempio n. 33
0
 def __init__(self, proxy={}, usr=None, pwd=None):
     FancyURLopener.__init__(self, proxy)
     self.count = 0
     self.proxy = proxy
     self.usr = usr
     self.pwd = pwd
Esempio n. 34
0
 def __init__(self, user, passwd):
     self._user = user
     self._passwd = passwd
     self._promptcalled = False
     FancyURLopener.__init__(self)
Esempio n. 35
0
    def download(self):
        bean = self.bean
        update = self.update
        if not bean or not bean.path:
            return None

        opener = FancyURLopener()
        remote = opener.open(bean.path)
        remote_size = 0

        if "Content-Length" in remote.headers:
            remote_size = int(remote.headers["Content-Length"])
            bean.size = size2text(remote_size)

        block_size = 4096
        block_count = 0

        ext = get_file_extension(bean.path)

        path = FC().online_save_to_folder
        if not os.path.isdir(path):
            os.makedirs(path)

        if bean.save_to:
            to_file = os.path.join(bean.save_to, bean.text + ".mp3")
        else:
            to_file = get_bean_download_path(bean, FC().online_save_to_folder)

        if not os.path.exists(os.path.dirname(to_file)):
            os.makedirs(os.path.dirname(to_file))

        to_file_tmp = to_file + ".tmp"

        if os.path.exists(to_file_tmp):
            bean.status = DOWNLOAD_STATUS_INACTIVE
            bean.to_file = to_file
            update(bean)
            return None

        if os.path.exists(to_file):
            bean.status = DOWNLOAD_STATUS_COMPLETED
            bean.to_file = to_file
            update(bean)
            return None

        bean.save_to = to_file
        with open(to_file_tmp, "wb") as tmp_file:
            data = True
            """begin download"""
            self.bean.status = DOWNLOAD_STATUS_DOWNLOADING
            self.bean.path = to_file
            self.update(self.bean)

            while data:
                data = remote.read(block_size)
                if data:
                    block_count += 1
                    tmp_file.write(data)
                    #time.sleep(0.1)
                    persent = block_count * block_size * 100.0 / remote_size
                    if block_count % 50 == 0:
                        bean.persent = persent
                        update(bean)
        time.sleep(0.5)
        """update file info on finish"""
        logging.debug("rename %s - %s" % (to_file_tmp, to_file))
        os.rename(to_file_tmp, to_file)
        bean.status = DOWNLOAD_STATUS_COMPLETED
        bean.to_file = to_file
        bean.persent = 100
        update(bean)
Esempio n. 36
0
fnames = {}
fnames['t1'] = 't1.tar'
fnames['t2'] = 't2.tar'
fnames['pd'] = 'pd.tar'
fnames['mra'] = 'mra.tar'
fnames['demographic'] = 'demographic.xls'


if DOWNLOAD_IMAGES:
    # Download all IXI data
    for key, url in urls.items():

        if not os.path.isfile(fnames[key]):
            print('Downloading {} from {}'.format(fnames[key], url))
            curr_file = FancyURLopener()
            curr_file.retrieve(url, fnames[key])
        else:
            print('File {} already exists. Skipping download.'.format(
                fnames[key]))

if EXTRACT_IMAGES:
    # Extract the HH subset of IXI
    for key, fname in fnames.items():

        if (fname.endswith('.tar')):
            print('Extracting IXI HH data from {}.'.format(fnames[key]))
            output_dir = os.path.join('./orig/', key)

            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
Esempio n. 37
0
 def http_error(self, url, fp, errcode, errmsg, headers, data=None):
   if (errcode != 404):
     self.found = True
   return FancyURLopener.http_error(self, url, fp, errcode, errmsg, headers, data)
Esempio n. 38
0
#!/usr/bin/python3
from urllib.request import FancyURLopener
import re


email = '' # @gmail.com can be left out
password  = ''
url = 'https://%s:%[email protected]/mail/feed/atom' % (email, password)
opener = FancyURLopener()
page = opener.open(url)

contents = page.read().decode('utf-8')

ifrom = contents.index('<fullcount>') + 11
ito   = contents.index('</fullcount>')
titles = contents.split('<title>')
fullcount = contents[ifrom:ito]
print(fullcount + ' new messages\n')
for mess in titles[2:]:
    mess = (re.sub(r'(<modified>)[\w|\-|:]*', '', mess))
    mess = (re.sub(r'\<[^>]*\>(tag:)*', ' ', mess))
    mess =(re.sub(r'(gmail.google.com,2004:)\d*', '', mess))
    mess = (re.sub(r'\&[^&]*\;','', mess))
    print(mess[:45])

page.close()
 def openit(self, url):
     self.numTries = 0
     return FancyURLopener.open(self, url)
Esempio n. 40
0
 def __init__(self, username):
     FancyURLopener.__init__(self)
     FancyURLopener.version = "/u/" + username + " getting awesome wallpapers"
Esempio n. 41
0
#!/usr/bin/env python

from urllib.request import FancyURLopener

email = 'your email'  # @gmail.com can be left out
password = '******'

url = 'https://%s:%[email protected]/mail/feed/atom' % (email, password)

opener = FancyURLopener()
page = opener.open(url)

contents = page.read().decode('utf-8')

ifrom = contents.index('<fullcount>') + 11
ito = contents.index('</fullcount>')

fullcount = contents[ifrom:ito]

print(fullcount + ' new')
Esempio n. 42
0
 def __init__(self, username=None, passwd=None, *args, **kw):
     FancyURLopener.__init__(self, *args, **kw)
     self.username = username
     self.passwd = passwd
Esempio n. 43
0
 def __init__(self, user, passwd):
     self._user = user
     self._passwd = passwd
     self._promptcalled = False
     FancyURLopener.__init__(self)
Esempio n. 44
0
File: gm3.py Progetto: xircon/bspwm
def auth():
    '''The method to do HTTPBasicAuthentication'''
    opener = FancyURLopener()
    f = opener.open(_URL)
    feed = f.read()
    return feed