Exemplo n.º 1
0
	def open(self, url, data=None, max_tries=None):
		"""
		Ouvrir une url
		@param url
		@param data les data (POST)
		@param max_tries le nombre maximum d'essais, si non préciser la valeur donnée lors de l'initialisation sera prise
		@return un stream
		@throw ExceptionMaxTries quand le nombre maximum de tentatives est atteind
		@throw ExceptionUrlForbid quand le robot n'a pas le droit de visiter l'url
		"""
		if not max_tries:
			max_tries = self.max_tries
		else:
			self.max_tries = max_tries
		if not self.robot or self.robot.can_fetch(url):
			for _ in range(max_tries):
				try:
					stream = FancyURLopener.open(self, url, data)
				except Exception as ex:
					error = get_traceback()+"\n"+str(ex)
				else:
					return stream
			else:
				raise ExceptionMaxTries("max tries %s : %s" % (url, error))
		else:
			raise ExceptionUrlForbid("robots can't access to %s" % url)
Exemplo n.º 2
0
                    def ach5():
                        z = t7.get("1.0", END)
                        print(f)
                        print(k)
                        print(g)

                        sql1 = "SELECT mobile FROM customer_register WHERE email = '" + g + "' "
                        sql = "UPDATE complaints SET solution ='" + z + "' WHERE cid = '" + g + "' AND comp_heading = '" + k + "' "
                        db = pymysql.connect("localhost", "root", "", "cms")
                        c = db.cursor()
                        c.execute(sql)
                        try:
                            c.execute(sql1)
                            achaa = c.fetchone()
                            db.commit()
                            print(achaa)
                            tinn = FancyURLopener()
                            DL = []
                            for i in achaa:
                                DL.append(i)
                            ok = achaa[0]
                            phone = ok
                            yourmsg = "Hi customer, The solution of the complain '" + k + "' on '" + g + "' has been provided to your registered account. Kindly check it. \nThank you \nCMS "
                            page = tinn.open(
                                'http://5.189.169.241:5012/api/SendSMS?api_id=API245772015763&api_password=12345678&sms_type=T&encoding=T&sender_id=BLKSMS&phonenumber='
                                + str(phone) + '&textmessage="' + yourmsg +
                                '"')
                        except:
                            print("Invalid No.")
                        db.close()

                        tech1()
def getFeed(url):
    try:
        opener = FancyURLopener()
        page = opener.open(url)
        contents = page.read().decode('utf-8')
        feed = feedparser.parse(contents)
        return feed
    except:
        return None
def getFeed(url):
    try:
        opener = FancyURLopener()
        page = opener.open(url)
        contents = page.read().decode('utf-8')
        feed=feedparser.parse(contents)
        return feed
    except:
        return None
Exemplo n.º 5
0
def getBashorg():  #Получить цитату с баша
    FancyURLopener.version = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0'
    myopener = FancyURLopener()
    page = myopener.open('https://bash.im/forweb/')
    s = page.read().decode("cp1251").replace("<\' + \'br>", "\\n").replace(
        "&quot;", "\"").replace("&lt;", "<").replace("&gt;", ">")
    page.close()
    strt = s.index('id="b_q_t" style="padding: 1em 0;">') + len(
        'id="b_q_t" style="padding: 1em 0;">')
    s = s[strt:]
    return s[:s.index("<\'")]
Exemplo n.º 6
0
def download(who, url):
    tmp = FancyURLopener()
    url = tmp.open(url)
    if 2 < sys.version_info.major:
        path = url.info().get_filename()
    else:
        path = url.info().get('Content-Disposition').split('=')[1].strip('"')
    path = os.path.basename(path)
    with open(path, 'wb') as f:
        while True:
            tmp = url.read(4096)
            if 0 == len(tmp):
                break
            f.write(tmp)
        f.close()
    return path
Exemplo n.º 7
0
def urlopen(url):
    charset = None

    tmp = FancyURLopener()
    tmp = tmp.open(url)

    if 2 < sys.version_info.major:
        charset = tmp.info().get_content_charset()
    else:
        charset = 'windows-1251'

    tmp = tmp.read().decode(charset)
    if str != type(tmp):
        tmp = str(tmp.encode('utf-8'))

    return tmp
Exemplo n.º 8
0
 def run(self):
     opener = FancyURLopener()
     try:
         remotefile = opener.open(self.url)
     except IOError:
         logic.logger.new("Unable to connect to internet", 'ERROR')
         return
     if remotefile.getcode() == 404:
         logic.logger.new("PDB file not found on pdb.org", 'ERROR')
         return
     elif remotefile.getcode() >= 500:
         logic.logger.new("PDB.org is currently unavailable", 'ERROR')
         return
     localfile = open(self.pdbFullPath, 'wb')
     localfile.write(remotefile.read())
     localfile.close()
     remotefile.close()
Exemplo n.º 9
0
 def file_download(self, url):
     urlx = 'http:' + url
     imgname = url.split('/')[-1]
     if imgname.split('.')[-1] == 'gif':
         imgPath = os.path.join(PATH, 'gif', imgname)
     else:
         imgPath = os.path.join(PATH, 'pic', imgname)
     if not os.path.lexists(imgPath):
         # urllib.request.urlretrieve(urlx, imgPath)
         opener = FancyURLopener()
         opener.addheaders.clear()
         opener = self.insert_header(opener, urlx, imgPath)
         with open(imgPath, 'wb+') as f:
             while True:
                 chunk = opener.open(urlx, self._chunk_size)
                 if not chunk: break
                 f.write(chunk)
Exemplo n.º 10
0
    def gen_qrcode(self):
        url = "https://zxing.org/w/chart?cht=qr&chs=350x350&chld=L&choe=UTF-8&chl=" + self.mecard(
        )
        webbrowser.open(url)

        png_file = "{}.png".format(self.fname)
        # urlretrieve(url, png_file)

        # with urlopen(url) as response, open("test.png", 'wb') as out_file:
        #     data = response.read()  # a `bytes` object
        #     out_file.write(data)

        # with urlopen(url) as response, open(png_file, 'wb') as out_file:
        #     shutil.copyfileobj(response, out_file)

        # credit: http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
        # credit: http://stackoverflow.com/questions/31893207/python-3-4-http-error-505-retrieving-json-from-url
        url_request = FancyURLopener({})
        with url_request.open(url) as response, open(png_file,
                                                     'wb') as out_file:
            shutil.copyfileobj(response, out_file)
Exemplo n.º 11
0
    def __init__(self, url, level):
        super(myParser, self).__init__()
        sleep(0.1)
        print('Checking URL', url)
        self.__level = level
        self.__done = False
        self.__currentlyParsingDeadATag = False
        self.__currentlyParsingTitleTag = False
        self.__url = url
        self.linkWasDead = False
        parsedURLs.append(self.__url)
        try:
            opener = FancyURLopener({})
            f = opener.open(self.__url)
            data = f.read()
        except ssl.SSLError:
            return
        except OSError:
            return
        except ValueError:
            if not self.__url in deadURLs:
                print()
                print('Found a dead link:', self.__url)
                deadURLs.append(self.__url)
                self.linkWasDead = True
            self.__done = True
            return

        try:
            text = data.decode(errors='replace')
        except UnicodeDecodeError:
            pass
            #print('This is a binary file:', self.__url)
        else:
            try:
                self.feed(text)
            except ValueError:
                pass
            except ssl.SSLError:
                pass
Exemplo n.º 12
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('data_file',
                        metavar='data_file',
                        type=str,
                        nargs=1,
                        help='file with count data')

    parser.add_argument("-s", "--settings", default=None)
    parser.add_argument("-j", "--jobs", type=int, default=None)
    parser.add_argument("-p",
                        "--preprocessing",
                        choices=['normalize_sf'],
                        default=None)
    parser.add_argument("-n", "--nosubset", action='store_true')
    args = parser.parse_args()

    if args.settings is not None:
        settings = h.load_settings(args.settings)
    else:
        import outpyr.settings as settings

    if validate_web_url(args.data_file[0]):
        if args.data_file[0].endswith('/'):
            data_file = args.data_file[0][:-1].rsplit('/', 1)[1]
        else:
            data_file = args.data_file[0].rsplit('/', 1)[1]

        if os.path.isfile(data_file):
            print(
                'It seems that the provided URL was already downloaded to file %s. Skipping downloading.'
                % data_file)
        else:
            print('Downloading URL', args.data_file[0], '...')

            opener = FancyURLopener({})
            with opener.open(args.data_file[0]) as f:
                text = f.read().decode('utf-8')
            print('Finished!')

            with open('tmp.txt', 'w', encoding='utf-8') as f:
                f.write(text)

            df = h.csv_to_df('tmp.txt')
            if not args.nosubset and (list(
                    df.index)) == helpers_kremer.INDEX_FULL and (list(
                        df.columns)) == helpers_kremer.COLUMNS:
                print('Kremer dataset recognized, filtering genes...')
                df = df.loc[helpers_kremer.INDEX_FILTERED, :]
                print('Done!')
            h.save_df_to_csv(df, data_file)
            os.remove('tmp.txt')
    else:
        data_file = args.data_file[0]
    base_name, ext = os.path.splitext(os.path.basename(data_file))
    dir_ = os.path.abspath(os.path.dirname(data_file))

    pvalues_file = os.path.join(dir_, base_name + '-pvalues.csv')
    pvalues_std_file = os.path.join(dir_, base_name + '-pvalues-std.csv')
    pvalues_sample_adjusted_file = os.path.join(
        dir_, base_name + '-pvalues-adjusted.csv')
    pvalues_gene_adjusted_file = os.path.join(
        dir_, base_name + '-pvalues-gene-adjusted.csv')
    zscores_file = os.path.join(dir_, base_name + '-zscores.csv')

    if os.path.isfile(data_file):
        print('Running OutPyR on', data_file, '...')
        if args.jobs is None:
            gene_subset = None
            output_dir = outpyr.train_cpu_singleprocess.run(
                data_file, settings, args.preprocessing, gene_subset,
                'p_j_r_j_numba')
        else:
            output_dir = outpyr.train_cpu_multiprocess.run(
                data_file, settings, args.preprocessing, args.jobs)

        dir_abs = os.path.join(dir_, output_dir)
        from outpyr import helpers_tensorflow as htf
        ti = htf.TraceInspector(dir_abs)
        if 'p_values_mean' not in ti.v:
            print('Post-sampling: calculating p-values...')
            ti.set_final_values_from_trace()
            print('Done!')
            print('Saving p-values inside of the trace directory...')
            with open(os.path.join(ti.trace_data_folder, 'variables.pickle'),
                      'wb') as f_variables:
                pickle.dump(ti.v, f_variables)
            print('Finished!')

        print('Saving scores as CSV files...')
        h.save_df_to_csv(
            pd.DataFrame(ti.get_p_value_matrix(),
                         index=ti.df.index,
                         columns=ti.df.columns), pvalues_file)
        h.save_df_to_csv(
            pd.DataFrame(np.sqrt(ti.v['p_values_var']),
                         index=ti.df.index,
                         columns=ti.df.columns), pvalues_std_file)

    else:
        parser.error("The file (%s) you provided does not exist." % data_file)
Exemplo n.º 13
0
def main():
    import argparse

    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('data_file',
                        metavar='data_file',
                        type=str,
                        nargs=1,
                        help='file with count data')

    parser.add_argument("-s", "--settings", default=None)
    parser.add_argument("-p",
                        "--preprocessing",
                        choices=['normalize_sf'],
                        default=None)
    parser.add_argument("-n", "--nosubset", action='store_true')
    args = parser.parse_args()

    if args.settings is not None:
        settings = h.load_settings(args.settings)
    else:
        import outpyr.settings as settings

    if validate_web_url(args.data_file[0]):
        if args.data_file[0].endswith('/'):
            data_file = args.data_file[0][:-1].rsplit('/', 1)[1]
        else:
            data_file = args.data_file[0].rsplit('/', 1)[1]

        if os.path.isfile(data_file):
            print(
                'It seems that the provided URL was already downloaded to file %s. Skipping downloading.'
                % data_file)
        else:
            print('Downloading URL', args.data_file[0], '...')

            opener = FancyURLopener({})
            with opener.open(args.data_file[0]) as f:
                text = f.read().decode('utf-8')
            print('Finished!')

            with open('tmp.txt', 'w', encoding='utf-8') as f:
                f.write(text)

            df = h.csv_to_df('tmp.txt')
            if not args.nosubset and (list(
                    df.index)) == helpers_kremer.INDEX_FULL and (list(
                        df.columns)) == helpers_kremer.COLUMNS:
                print('Kremer dataset recognized, filtering genes...')
                df = df.loc[helpers_kremer.INDEX_FILTERED, :]
                print('Done!')
            h.save_df_to_csv(df, data_file)
            os.remove('tmp.txt')
    else:
        data_file = args.data_file[0]
    base_name, ext = os.path.splitext(os.path.basename(data_file))
    dir_ = os.path.abspath(os.path.dirname(data_file))

    pvalues_file = os.path.join(dir_, base_name + '-opx-pv.csv')

    if os.path.isfile(data_file):
        print('Running OutPyRX on', data_file, '...')
        gene_subset = None
        output_dir = outpyr.train_cpu_singleprocess.run(
            data_file, settings, args.preprocessing, gene_subset,
            'p_j_r_j_numba')

        dir_abs = os.path.join(dir_, output_dir)
        from outpyr import helpers_tensorflow as htf
        ti = htf.TraceInspector(dir_abs)
        if 'p_values_mean' not in ti.v:
            print('Post-sampling: calculating p-values...')
            ti.set_final_values_from_trace()
            print('Done!')
            print('Saving p-values inside of the trace directory...')
            with open(os.path.join(ti.trace_data_folder, 'variables.pickle'),
                      'wb') as f_variables:
                pickle.dump(ti.v, f_variables)
            print('Finished!')

        print('Saving scores as CSV files...')
        delta__ = ti.v['r_avg_diff_matrix'] / (ti.v['iteration'] + 1)
        zs__ = np.empty_like(delta__)
        for j in range(delta__.shape[0]):
            row = delta__[j, :]
            mu = row.mean()
            std = row.std()
            if std == 0:
                std = 0.000000000000001
            zs_row_ = (row - mu) / std
            _tmp = np.copy(zs_row_)
            _tmp[np.isinf(_tmp)] = 0
            _tmp[np.isneginf(_tmp)] = 0
            zs_row_[np.isinf(zs_row_)] = max(7, np.abs(_tmp).max())
            zs_row_[np.isneginf(zs_row_)] = min(-7, -np.abs(_tmp).max())
            zs__[j, :] = zs_row_
        pv__ = 2 * scipy.stats.norm.cdf(-np.abs(zs__))
        h.save_df_to_csv(
            pd.DataFrame(pv__, index=ti.df.index, columns=ti.df.columns),
            pvalues_file)

    else:
        parser.error("The file (%s) you provided does not exist." % data_file)
 def openit(self, url):
     self.numTries = 0
     return FancyURLopener.open(self, url)
Exemplo n.º 15
0
    def download(self):
        bean = self.bean
        update = self.update
        if not bean or not bean.path:
            return None

        opener = FancyURLopener()
        remote = opener.open(bean.path)
        remote_size = 0

        if "Content-Length" in remote.headers:
            remote_size = int(remote.headers["Content-Length"])
            bean.size = size2text(remote_size)

        block_size = 4096
        block_count = 0

        ext = get_file_extension(bean.path)

        path = FC().online_save_to_folder
        if not os.path.isdir(path):
            os.makedirs(path)

        if bean.save_to:
            to_file = os.path.join(bean.save_to, bean.text + ".mp3")
        else:
            to_file = get_bean_download_path(bean, FC().online_save_to_folder)

        if not os.path.exists(os.path.dirname(to_file)):
            os.makedirs(os.path.dirname(to_file))

        to_file_tmp = to_file + ".tmp"

        if os.path.exists(to_file_tmp):
            bean.status = DOWNLOAD_STATUS_INACTIVE
            bean.to_file = to_file
            update(bean)
            return None

        if os.path.exists(to_file):
            bean.status = DOWNLOAD_STATUS_COMPLETED
            bean.to_file = to_file
            update(bean)
            return None

        bean.save_to = to_file
        with open(to_file_tmp, "wb") as tmp_file:
            data = True
            """begin download"""
            self.bean.status = DOWNLOAD_STATUS_DOWNLOADING
            self.bean.path = to_file
            self.update(self.bean)

            while data:
                data = remote.read(block_size)
                if data:
                    block_count += 1
                    tmp_file.write(data)
                    #time.sleep(0.1)
                    persent = block_count * block_size * 100.0 / remote_size
                    if block_count % 50 == 0:
                        bean.persent = persent
                        update(bean)
        time.sleep(0.5)
        """update file info on finish"""
        logging.debug("rename %s - %s" % (to_file_tmp, to_file))
        os.rename(to_file_tmp, to_file)
        bean.status = DOWNLOAD_STATUS_COMPLETED
        bean.to_file = to_file
        bean.persent = 100
        update(bean)
Exemplo n.º 16
0
#!/usr/bin/python3
from urllib.request import FancyURLopener
import re


email = '' # @gmail.com can be left out
password  = ''
url = 'https://%s:%[email protected]/mail/feed/atom' % (email, password)
opener = FancyURLopener()
page = opener.open(url)

contents = page.read().decode('utf-8')

ifrom = contents.index('<fullcount>') + 11
ito   = contents.index('</fullcount>')
titles = contents.split('<title>')
fullcount = contents[ifrom:ito]
print(fullcount + ' new messages\n')
for mess in titles[2:]:
    mess = (re.sub(r'(<modified>)[\w|\-|:]*', '', mess))
    mess = (re.sub(r'\<[^>]*\>(tag:)*', ' ', mess))
    mess =(re.sub(r'(gmail.google.com,2004:)\d*', '', mess))
    mess = (re.sub(r'\&[^&]*\;','', mess))
    print(mess[:45])

page.close()
Exemplo n.º 17
0
def get_month_data(month, cookie, token):
    params = 'date={}&_token={}'.format(month, token)
    opener = FancyURLopener()
    opener.addheader('Cookie', cookie)
    stream = opener.open(APIURL, params)
    return stream.read().decode('utf-8')
Exemplo n.º 18
0
#!/usr/bin/env python

from urllib.request import FancyURLopener

email = 'your email'  # @gmail.com can be left out
password = '******'

url = 'https://%s:%[email protected]/mail/feed/atom' % (email, password)

opener = FancyURLopener()
page = opener.open(url)

contents = page.read().decode('utf-8')

ifrom = contents.index('<fullcount>') + 11
ito = contents.index('</fullcount>')

fullcount = contents[ifrom:ito]

print(fullcount + ' new')
Exemplo n.º 19
0
Arquivo: gm3.py Projeto: xircon/bspwm
def auth():
    '''The method to do HTTPBasicAuthentication'''
    opener = FancyURLopener()
    f = opener.open(_URL)
    feed = f.read()
    return feed