コード例 #1
0
ファイル: github.py プロジェクト: stt/vertx-trans
 def handler(msg):
     if not msg.body.has_key('error'):
         headers = { 'Authorization': 'token '+msg.body['access_token'] }
         get('https://api.github.com/user', handle_response, headers=headers)
         if 'email' in msg.body['scope']: pass
     else:
         logger.error(msg.body)
コード例 #2
0
ファイル: browser.py プロジェクト: simartin/servo
    def _latest_chromedriver_url(self, browser_binary=None):
        latest = None
        chrome_version = self.version(browser_binary)
        assert chrome_version, "Cannot detect the version of Chrome"

        # Remove channel suffixes (e.g. " dev").
        chrome_version = chrome_version.split(' ')[0]
        parts = chrome_version.split(".")
        if len(parts) == 4:
            latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s.%s.%s" % tuple(parts[:-1])
            try:
                latest = get(latest_url).text.strip()
            except requests.RequestException:
                latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s" % parts[0]
                try:
                    latest = get(latest_url).text.strip()
                except requests.RequestException:
                    pass
        if latest is None:
            # Fall back to *Chromium* build archives.
            omaha = get("https://omahaproxy.appspot.com/deps.json?version=" + chrome_version).json()
            revision = omaha['chromium_base_position']
            url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
                self.chromium_platform_string(), revision, self.platform_string())
        else:
            url = "https://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (
                latest, self.platform_string())

        return url
コード例 #3
0
ファイル: test.py プロジェクト: BackupTheBerlios/py-acqua-svn
	def refresh_data (self, islocked):
		if islocked: return
		
		self.store.clear ()
		self.vars[1].clear_all ()
		self.vars[15].clear_all ()
		
		for y in utils.get ('select * from vasca'):
			self.vars[1].append_text (y[3])
			self.filter_menu.append (gtk.CheckMenuItem (y[3]))
		
		for y in utils.get ('select * from test'):
			self.store.append ([y[0], y[1], y[2], y[3], y[4],
					y[5], y[6], y[7], y[8], y[9], y[10], y[11], y[12], y[13], y[14], y[15], y[16],
					gcolor[2], gcolor[2], gcolor[2], gcolor[2], gcolor[2], gcolor[2], gcolor[2],
					gcolor[2], gcolor[2], gcolor[2], gcolor[2], gcolor[2], gcolor[2]])
		
		# Riempo con i limiti
		for y in impostazioni.get_names_of_collections ():
			self.vars[15].append_text (y)
		
		mod = self.view.get_model ()
		it = mod.get_iter_first ()
		
		while it != None:
			self._check_iterator (mod, it)
			it = mod.iter_next (it)
コード例 #4
0
ファイル: browser.py プロジェクト: brettz9/web-platform-tests
 def _latest_chromedriver_url(self, browser_binary=None):
     latest = None
     chrome_version = self.version(browser_binary)
     if chrome_version is not None:
         parts = chrome_version.split(".")
         if len(parts) == 4:
             latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s.%s.%s" % (
                 parts[0], parts[1], parts[2])
             try:
                 latest = get(latest_url).text.strip()
             except requests.RequestException:
                 latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s" % parts[0]
                 try:
                     latest = get(latest_url).text.strip()
                 except requests.RequestException:
                     pass
     if latest is None:
         # Fall back to the tip-of-tree *Chromium* build.
         latest_url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/LAST_CHANGE" % (
             self.chromium_platform_string())
         latest = get(latest_url).text.strip()
         url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
             self.chromium_platform_string(), latest, self.platform_string())
     else:
         url = "https://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (
             latest, self.platform_string())
     return url
コード例 #5
0
def add_makefile(options):
    utils.log('Adding makefile')
    target_path = utils.get(options.download_target, options.build_target)
    makefile_path = utils.get(options.download_target, options.build_target, r'Makefile')
    if not os.path.exists(target_path):
        os.makedirs(target_path)
    if not os.path.isfile(makefile_path):
        shutil.copyfile(utils.get(r'resources', r'Makefile'), makefile_path)
コード例 #6
0
def transfer_source(options):
    utils.log('Downloading files from {0}'.format(options.download_url))
    client = pysvn.Client()
    
    if options.download_revision.lower() == 'latest':
        client.checkout(options.download_url, utils.get(options.download_target))
    else:
        client.checkout(options.download_url, utils.get(options.download_target), 
                        revision=pysvn.Revision(pysvn.opt_revision_kind.number, 
                                                int(options.download_revision)))
コード例 #7
0
def generate_compile_file(options):
    utils.log('Generating temporary compilation file')
    with open(utils.get(r'resources', r'template'), 'r') as f:
        template = f.read()
    bat_file = template.format(
        windriver=options.windriver_install_dir, 
        wind_base=options.wind_base, 
        working_dir=utils.get(options.download_target, options.build_target))
    with open(r'compile.bat', 'w') as f:
        f.write(bat_file)
コード例 #8
0
ファイル: browser.py プロジェクト: stjepang/servo
 def install_webdriver(self, dest=None):
     """Install latest Webdriver."""
     if dest is None:
         dest = os.pwd
     latest = get("http://chromedriver.storage.googleapis.com/LATEST_RELEASE").text.strip()
     url = "http://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (latest,
                                                                                  self.platform_string())
     unzip(get(url).raw, dest)
     path = find_executable(dest, "chromedriver")
     st = os.stat(path)
     os.chmod(path, st.st_mode | stat.S_IEXEC)
     return path
コード例 #9
0
def version(module):
    if os.path.exists(join(root_dir, module, '.git')):
        os.chdir(join(root_dir, module))
        version = get('git', 'log', '-1', '--format=%cd', '--date=iso').split(' ')[0].replace('-', '')
        version += '-' + get('git', 'rev-list', 'HEAD', '--count').strip()
        version += '-' + get('git', 'describe', '--always').strip()
        os.chdir(root_dir)
    else:
        if 'modules' in settings.release and module in settings.release['modules']:
            version = settings.release['modules'][module]['version']
        else:
            version = -1
    return version
コード例 #10
0
ファイル: test.py プロジェクト: BackupTheBerlios/py-acqua-svn
	def __init__ (self): 
		# id integer, date DATE, vasca FLOAT, ph FLOAT, kh FLOAT, gh
		# NUMERIC, no NUMERIC, noo NUMERIC, con NUMERIC, amm NUMERIC, fe
		# NUMERIC, ra NUMERIC, fo NUMERIC
		
		lst = gtk.ListStore (
			int,	# ID
			str,	# DATA
			str,	# VASCA
			float,	# PH
			float,	# KH
			float,	# GH
			float,	# NO
			float,	# NO2
			float,	# COND
			float,	# AMMO
			float,	# FERRO
			float,	# RAME
			float,	# FOSFATI
			float,	# calcio
			float,	# magnesio
			float)	# densita

		cols = [_('Id'), _('Data'), _('Vasca'), _('Ph'), _('Kh'), _('Gh'), _('No'), _('No2'),
			_('Conducibilita\''), _('Ammoniaca'), _('Ferro'), _('Rame'), _('Fosfati'),
			_('Calcio'), _('Magnesio'), _('Densita\'')]
		
		inst = [utils.DataButton (), utils.Combo ()]
		
		for i in range (13): inst.append (utils.FloatEntry ())

		dbwindow.DBWindow.__init__ (self, 2, 7, cols, inst, lst)
		
		for y in utils.get ('select * from test'):
			lst.append ([y[0], y[1], y[2], y[3], y[4],
					y[5], y[6], y[7], y[8], y[9], y[10], y[11], y[12], y[13], y[14], y[15]])
		
		for y in utils.get ('select * from vasca'):
			self.vars[1].append_text (y[3])

		self.set_title (_("Test"))
		self.set_size_request (600, 400)
		self.set_icon_from_file ("pixmaps/logopyacqua.jpg")

		btn = gtk.Button (_("Grafico"))
		btn.connect ('clicked', self.on_draw_graph)
		btn.set_relief (gtk.RELIEF_NONE)

		self.button_box.pack_start (btn)

		self.show_all ()
コード例 #11
0
ファイル: browser.py プロジェクト: stjepang/servo
    def install_webdriver(self, dest=None):
        """Install latest Geckodriver."""
        if dest is None:
            dest = os.getcwd()

        version = self._latest_geckodriver_version()
        format = "zip" if uname[0] == "Windows" else "tar.gz"
        logger.debug("Latest geckodriver release %s" % version)
        url = ("https://github.com/mozilla/geckodriver/releases/download/%s/geckodriver-%s-%s.%s" %
               (version, version, self.platform_string_geckodriver(), format))
        if format == "zip":
            unzip(get(url).raw, dest=dest)
        else:
            untar(get(url).raw, dest=dest)
        return find_executable(os.path.join(dest, "geckodriver"))
コード例 #12
0
ファイル: browser.py プロジェクト: larsbergstrom/servo
 def _official_chromedriver_url(self, chrome_version):
     # http://chromedriver.chromium.org/downloads/version-selection
     parts = chrome_version.split(".")
     assert len(parts) == 4
     latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s.%s.%s" % tuple(parts[:-1])
     try:
         latest = get(latest_url).text.strip()
     except requests.RequestException:
         latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s" % parts[0]
         try:
             latest = get(latest_url).text.strip()
         except requests.RequestException:
             return None
     return "https://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (
         latest, self.platform_string())
コード例 #13
0
	def __init__ (self):
		
		lst = gtk.ListStore (int, str, str, int, str, gtk.gdk.Pixbuf, str)
		self.col_lst = [_('Id'), _('Data'), _('Vasca'), _('Quantita'), _('Nome'), _("Immagine")]
		
		dbwindow.DBWindow.__init__ (self, 2, 2, self.col_lst,
			[utils.DataButton (), utils.Combo (), utils.IntEntry (), gtk.Entry (), utils.ImgEntry ()], lst)
		
		for y in utils.get ("select * from invertebrati"):
			lst.append([y[0], y[1], y[2], y[3], y[4], utils.make_image(y[5]), y[5]])
		for y in utils.get ("select * from vasca"):
			self.vars[1].append_text (y[3])
		
		self.set_title (_("Invertebrati"))
		self.set_size_request (600, 400)
		self.set_icon_from_file ("pixmaps/logopyacqua.jpg")
コード例 #14
0
ファイル: utils_test.py プロジェクト: ceph/autotest
 def testGetWithDir(self):
     tmpdir = utils.get_tmp_dir()
     origpath = os.path.join(tmpdir, 'testGetWithDir')
     os.mkdir(origpath)
     dstpath = utils.get(origpath)
     self.assertTrue(dstpath.endswith('/'))
     self.assertTrue(os.path.isdir(dstpath))
コード例 #15
0
ファイル: nfl.py プロジェクト: lwnexgen/py_webradio
def schedule(apikey, date):
    date = datetime.datetime.now()
    today = '{}-{}-{}'.format(date.year, date.month, date.day)
    cachefile = 'data/{}-nfl'.format(today)
    if os.path.exists(cachefile):
        return json.load(open(cachefile))
    nflseason = date.year
    if date.month < 7:
        nflseason -= 1
    games = []
    for stage in ['PRE', 'REG', 'PST']:
        url = schedule_pattern.format(your_api_key=apikey,
                                      access_level='ot',
                                      version=2,
                                      year=nflseason,
                                      nfl_season=stage,
                                      format='json')
        resp = get(url)
        if resp.status_code != 200:
            print resp
            time.sleep(3)
            continue
        sched = json.loads(resp.text)
        for week in sched['weeks']:
            for game in week['games']:
                if game.get('status') == 'scheduled':
                    games.append(game)
    if not games:
        return []
    with open(cachefile, 'w') as cfp:
        cfp.write(json.dumps(games))
        cfp.flush()
    return games
コード例 #16
0
ファイル: crawler_cn.py プロジェクト: Peng-YM/LeetCode-Anki
    def fetch_solutionDetailArticle(self, slug):
        query_params = {
            "operationName":
            "solutionDetailArticle",
            "variables": {
                "slug": slug,
                "orderBy": "DEFAULT"
            },
            "query":
            "query solutionDetailArticle($slug: String!, $orderBy: SolutionArticleOrderBy!) {\n  solutionArticle(slug: $slug, orderBy: $orderBy) {\n    ...solutionArticle\n    content\n    question {\n      questionTitleSlug\n      __typename\n    }\n    position\n    next {\n      slug\n      title\n      __typename\n    }\n    prev {\n      slug\n      title\n      __typename\n    }\n    __typename\n  }\n}\n\nfragment solutionArticle on SolutionArticleNode {\n  rewardEnabled\n  canEditReward\n  uuid\n  title\n  slug\n  sunk\n  chargeType\n  status\n  identifier\n  canEdit\n  canSee\n  reactionType\n  reactionsV2 {\n    count\n    reactionType\n    __typename\n  }\n  tags {\n    name\n    nameTranslated\n    slug\n    tagType\n    __typename\n  }\n  createdAt\n  thumbnail\n  author {\n    username\n    profile {\n      userAvatar\n      userSlug\n      realName\n      __typename\n    }\n    __typename\n  }\n  summary\n  topic {\n    id\n    commentCount\n    viewCount\n    __typename\n  }\n  byLeetcode\n  isMyFavorite\n  isMostPopular\n  isEditorsPick\n  hitCount\n  videosInfo {\n    videoId\n    coverUrl\n    duration\n    __typename\n  }\n  __typename\n}\n"
        }

        resp = self.session.post("https://leetcode-cn.com/graphql",
                                 data=json.dumps(query_params).encode('utf8'),
                                 headers={
                                     "content-type": "application/json",
                                 })
        body = json.loads(resp.content)

        # parse data
        solution = get(body, "data.solutionArticle")
        if solution != None:
            questionTitleSlug = solution['question']["questionTitleSlug"]
            Solution.replace(
                problem=Problem.get(Problem.slug == questionTitleSlug),
                url=f"https://leetcode.com/articles/{questionTitleSlug}/",
                content=solution['content']).execute()
コード例 #17
0
    def solver(self, url: str):
        posts = []

        res = get("https://taifua.com/")
        soup = BeautifulSoup(res, features="lxml")
        for item in soup.select(".list-title"):
            link = item.select_one("a")
            posts.append(Post(link.get_text(), link.get("href"), 0))

        res = post(
            "https://taifua.com/wp-admin/admin-ajax.php",
            {
                "append": "list-home",
                "paged": 2,
                "action": "ajax_load_posts",
                "query": "",
                "page": "home",
            },
        )
        soup = BeautifulSoup(res, features="lxml")
        for item in soup.select(".list-title"):
            link = item.select_one("a")
            posts.append(Post(link.get_text(), link.get("href"), 0))

        return posts
コード例 #18
0
ファイル: head.py プロジェクト: 872119925/SunFarm-Assist
def sign(is_print=None):
    respone = utils.get(url=constant.sign_url.format(utils.get_wday()))
    print(respone)
    if is_print:
        print_sign_today_situation(note=respone.get('note'))
    return respone
    pass
コード例 #19
0
 def present(self):
     files = get(self.dnac, "dna/intent/api/v1/file/namespace/{nameSpace}".format(nameSpace=self.namespace))
     fileid_list = [(file['id'], file['sha1Checksum']) for file in files.json()['response'] if file['name'] == self.name]
     self.fileid =  None if fileid_list == [] else fileid_list[0][0]
     self.sha1 = None if fileid_list == [] else fileid_list[0][1]
     logger.debug("Looking for file {}, id found is {}".format(self.name, self.fileid))
     return self.fileid
コード例 #20
0
ファイル: utils_test.py プロジェクト: renormalist/autotest
 def testGetWithDir(self):
     tmpdir = utils.get_tmp_dir()
     origpath = os.path.join(tmpdir, 'testGetWithDir')
     os.mkdir(origpath)
     dstpath = utils.get(origpath)
     self.assertTrue(dstpath.endswith('/'))
     self.assertTrue(os.path.isdir(dstpath))
コード例 #21
0
ファイル: domain.py プロジェクト: dotajin/haoku-open
	def parse_album(self, task, result):
		album_imgs = self.albums[task['album']]['imgs']
		album_pages = self.albums[task['album']]['pages']
		html = result['html']
		imgs = self.match(self.re_image, html)
		pages = self.match(self.re_page, html)

		title = self.albums[task['album']]['title']
		if not len(title):
			self.albums[task['album']]['title'] = self.search(self.re_title, html)

		if imgs:
			print imgs
			for img in imgs:
				if img not in album_imgs:
					album_imgs[img] = 'wait', ''
					content = get(img, allow_types='*/*', resp=True).content
					path = self.master.file.put(task['_id'], content, 'jpg')
					album_imgs[img] = 'done', path
		else:
			print 'imgs is None', imgs, task['_id']
			album_imgs[img] = 'wait'
		for page in pages:
			if page not in album_pages:
				album_pages[page] = 'wait'

		self.finish_album(task)
コード例 #22
0
def add_dataset(packageName):
    if packageName in apiHDXDatasetList:
        print('Dataset ' + packageName + ' already exists, skipping...')
        return

    print('Adding dataset ' + packageName)

    result = post(payload={
        "name": packageName,
        "provider": "hdx",
        "connectorType": "rest",
        "tableName": packageName
    },
                  endpoint='v1/dataset',
                  api_url=api_url,
                  api_token=api_token)
    dataset_id = result['data']['id']

    status = 'pending'

    while status == 'pending':
        get_result = get(payload={},
                         endpoint='v1/dataset/' + dataset_id,
                         api_url=api_url,
                         api_token=api_token)
        status = get_result['data']['attributes']['status']
        if status == 'pending':
            print('Sleeping...')
            time.sleep(2)

    print(packageName, result)
コード例 #23
0
ファイル: brave_new_words.py プロジェクト: rajmera3/TMSD
def _get_word_def(url):
	try:
		resp = get(url)
		if not resp:
			print("Error while getting term information from: " + url)
			quit()
		soup = BeautifulSoup(resp.content, 'html.parser')
		word = soup.find('span', class_='oxencycl-headword').text

		definition = soup.find('div', class_='div1').find('p').get_text().strip()
		definition = definition.lstrip(string.digits).rstrip(string.digits).strip()

		print("{}\n{}".format(word, definition))
		return {
			"word": word,
			"definition": definition
		}
	except Exception as e:
		print("Error while getting word from brave new words: " + url)
		return None


# print(_get_word_def("http://www.oxfordreference.com/view/10.1093/acref/9780195305678.001.0001/acref-9780195305678-e-1"))

# generate text of all brave new words
# with open('brave_new_words_output.txt', 'a') as f:
# 	termsDefs = getWordsAndDefinition()
# 	for wordDef in termsDefs:
# 		f.write(wordDef['word'] + '\n')
# 		f.write(wordDef['definition'] + '\n')
コード例 #24
0
def playgame(date, feedId, provider, state):
    def adjustQuality(masterUrl):
        _720p60fps = "720p 60fps"
        qualityUrlDict = {
            "360p": "1200K/1200_{0}.m3u8",
            "540p": "2500K/2500_{0}.m3u8",
            "720p": "3500K/3500_{0}.m3u8"
        }
        current = addon.getSetting("quality")
        if current is None or current == _720p60fps or current == "":
            return masterUrl
        else:
            m3u8Path = qualityUrlDict.get(
                current, "3500K/3500_{0}.m3u8").format(
                    'slide' if state == 'In Progress' else 'complete-trimmed')
            xbmc.log(
                "Quality adjusted to '{0}', adjusting to {1}.".format(
                    current, m3u8Path), xbmc.LOGNOTICE)
            return masterUrl.rsplit('/', 1)[0] + "/" + m3u8Path

    def xbmcPlayer(url, mediaAuth):
        xbmc.log("XBMC trying to play URL [%s]" % (url), xbmc.LOGNOTICE)
        completeUrl = url + ("|Cookie=mediaAuth%%3D%%22%s%%22" % (mediaAuth))
        xbmc.Player().play(
            adjustQuality(url) + ("|Cookie=mediaAuth%%3D%%22%s%%22" %
                                  (mediaAuth)))
        #player.LazyManPlayer().play(adjustQuality(url) + ("|Cookie=mediaAuth%%3D%%22%s%%22" % (mediaAuth)))

    cdn = 'akc' if addon.getSetting("cdn") == "Akamai" else 'l3c'

    def getContentUrl(withCdn=True):
        actualCdn = cdn if withCdn else ""
        if provider == "NHL.tv":
            return "http://freegamez.ga/m3u8/%s/%s%s" % (date, feedId,
                                                         actualCdn)
        else:
            return "http://freegamez.ga/mlb/m3u8/%s/%s%s" % (date, feedId,
                                                             actualCdn)

    contentUrl = getContentUrl()
    xbmc.log("Trying to resolve from content-url: '" + contentUrl + "'",
             xbmc.LOGNOTICE)
    if not utils.head(contentUrl):
        contentUrl = getContentUrl(False)
        if not utils.head(contentUrl):
            xbmc.log("Cannot resolve content-url '" + contentUrl + "'",
                     xbmc.LOGERROR)
            raise ValueError("Invalid content-url '" + contentUrl + "'")
    response = urllib.urlopen(contentUrl)
    playUrl = response.read().replace('l3c', cdn)
    xbmc.log("Play URL resolved to : '" + playUrl + "'", xbmc.LOGNOTICE)
    mediaAuthSalt = utils.salt()
    if utils.get(playUrl, dict(mediaAuth=mediaAuthSalt)):
        xbmcPlayer(playUrl, mediaAuthSalt)
    else:
        otherCdn = 'akc' if cdn == 'l3c' else 'l3c'
        xbmc.log(
            "URL [%s] failed on GET, switching CDN from %s to %s" %
            (playUrl, cdn, otherCdn), xbmc.LOGNOTICE)
        xbmcPlayer(playUrl.replace(cdn, otherCdn), mediaAuthSalt)
コード例 #25
0
    def parse_album(self, task, result):
        album_imgs = self.albums[task['album']]['imgs']
        album_pages = self.albums[task['album']]['pages']
        html = result['html']
        imgs = self.match(self.re_image, html)
        pages = self.match(self.re_page, html)

        title = self.albums[task['album']]['title']
        if not len(title):
            self.albums[task['album']]['title'] = self.search(
                self.re_title, html)

        if imgs:
            print imgs
            for img in imgs:
                if img not in album_imgs:
                    album_imgs[img] = 'wait', ''
                    content = get(img, allow_types='*/*', resp=True).content
                    path = self.master.file.put(task['_id'], content, 'jpg')
                    album_imgs[img] = 'done', path
        else:
            print 'imgs is None', imgs, task['_id']
            album_imgs[img] = 'wait'
        for page in pages:
            if page not in album_pages:
                album_pages[page] = 'wait'

        self.finish_album(task)
コード例 #26
0
def get_status(dnac, serial):
    url = "onboarding/pnp-device?serialNumber={}".format(serial)
    response = get(dnac, url)
    try:
        return response.json()[0]['deviceInfo']['onbState']
    except IndexError:
        return None
コード例 #27
0
ファイル: crawler_cn.py プロジェクト: Peng-YM/LeetCode-Anki
    def fetch_questionSolutionArticles(self, slug):
        print(f"🤖 Fetching solution for problem: {slug}")
        query_params = {
            "operationName":
            "questionSolutionArticles",
            "variables": {
                "questionSlug": slug,
                "first": 10,
                "skip": 0,
                "orderBy": "DEFAULT"
            },
            "query":
            "query questionSolutionArticles($questionSlug: String!, $skip: Int, $first: Int, $orderBy: SolutionArticleOrderBy, $userInput: String, $tagSlugs: [String!]) {\n  questionSolutionArticles(questionSlug: $questionSlug, skip: $skip, first: $first, orderBy: $orderBy, userInput: $userInput, tagSlugs: $tagSlugs) {\n    totalNum\n    edges {\n      node {\n        ...solutionArticle\n        __typename\n      }\n      __typename\n    }\n    __typename\n  }\n}\n\nfragment solutionArticle on SolutionArticleNode {\n  rewardEnabled\n  canEditReward\n  uuid\n  title\n  slug\n  sunk\n  chargeType\n  status\n  identifier\n  canEdit\n  canSee\n  reactionType\n  reactionsV2 {\n    count\n    reactionType\n    __typename\n  }\n  tags {\n    name\n    nameTranslated\n    slug\n    tagType\n    __typename\n  }\n  createdAt\n  thumbnail\n  author {\n    username\n    profile {\n      userAvatar\n      userSlug\n      realName\n      __typename\n    }\n    __typename\n  }\n  summary\n  topic {\n    id\n    commentCount\n    viewCount\n    __typename\n  }\n  byLeetcode\n  isMyFavorite\n  isMostPopular\n  isEditorsPick\n  hitCount\n  videosInfo {\n    videoId\n    coverUrl\n    duration\n    __typename\n  }\n  __typename\n}\n"
        }
        resp = self.session.post("https://leetcode-cn.com/graphql",
                                 data=json.dumps(query_params).encode('utf8'),
                                 headers={
                                     "content-type": "application/json",
                                 })
        body = json.loads(resp.content)

        # parse data
        edges = get(body, "data.questionSolutionArticles.edges")
        if edges != None and len(edges) > 0:
            for edge in edges:
                # if Solution.get_or_none(Solution.problemid == edge['uuid']) is not None:
                #     continue
                if edge != None and edge["node"] != None and edge["node"][
                        "byLeetcode"] and edge["node"]["slug"] != None:
                    return self.fetch_solutionDetailArticle(
                        edge["node"]["slug"])
コード例 #28
0
ファイル: utils_test.py プロジェクト: ceph/autotest
 def testGetWithHTTP(self):
     # Yeah, this test is a bad idea, oh well
     url = 'http://www.kernel.org/pub/linux/kernel/README'
     tmppath = utils.get(url)
     f = file(tmppath)
     f.readline()
     self.assertTrue('Linux' in f.readline().split())
コード例 #29
0
ファイル: crawler_cn.py プロジェクト: Peng-YM/LeetCode-Anki
    def fetch_mySubmissionDetail(self, solutionid, slug):
        query_params = {
            "operationName":
            "mySubmissionDetail",
            "variables": {
                "id": solutionid
            },
            "query":
            "query mySubmissionDetail($id: ID!) {\n  submissionDetail(submissionId: $id) {\n    id\n    code\n    runtime\n    memory\n    rawMemory\n    statusDisplay\n    timestamp\n    lang\n    passedTestCaseCnt\n    totalTestCaseCnt\n    sourceUrl\n    question {\n      titleSlug\n      title\n      translatedTitle\n      questionId\n      __typename\n    }\n    ... on GeneralSubmissionNode {\n      outputDetail {\n        codeOutput\n        expectedOutput\n        input\n        compileError\n        runtimeError\n        lastTestcase\n        __typename\n      }\n      __typename\n    }\n    submissionComment {\n      comment\n      flagType\n      __typename\n    }\n    __typename\n  }\n}\n"
        }

        resp = self.session.post("https://leetcode-cn.com/graphql",
                                 data=json.dumps(query_params).encode('utf8'),
                                 headers={
                                     "content-type": "application/json",
                                 })
        body = json.loads(resp.content)

        # parse data
        solution = get(body, "data.submissionDetail")
        # Solution.replace(
        #         problem=solution['id'],
        #         url=f"https://leetcode-cn.com/articles/{slug}/",
        #         content=solution['code']
        #     ).execute()

        Submission.insert(id=solution['id'],
                          slug=slug,
                          language=solution['lang'],
                          created=solution['timestamp'],
                          source=solution['code']).execute()
コード例 #30
0
ファイル: client.py プロジェクト: lism/GateIOTools
def order_book(currency_pair: str):
    """
    市场深度(委托挂单/买单)
    :param currency_pair: 交易对
    :return:
      {
        "result": "true",
        "asks": [   // 卖方深度
                [29500,    4.07172355],
                [29499,    0.00203397],
                [29495,    1],
                [29488,    0.0672],
                [29475,    0.001]
            ],
        "bids": [   // 买方深度
                [28001, 0.0477],
                [28000, 0.35714018],
                [28000, 2.56222976],
                [27800, 0.0015],
                [27777, 0.1]
            ]
        }
    """
    rst = get(DATA_URL, "/orderBook/" + currency_pair)
    asks = rst['asks']
    bids = rst['bids']
    return asks, bids
コード例 #31
0
def declare(address, user, password, meta):
    headers = {
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
    }
    url = 'http://' + address + '/tag/declare'
    data = {'username': user, 'password': password, 'meta': meta}
    return utils.get(url, data, headers)
コード例 #32
0
    def __init__(self, num_classes=50, training=True, _all=False):
        """
        Reads in the necessary data from disk and prepares data for training.
        """
        np.random.seed(0)
        self.num_classes = num_classes
        self.mean_vec = np.zeros(3)
        self.std_vec = np.zeros(3)
        # Load in all the data we need from disk
        self.metadata = pd.read_csv(get('csv_file'))
        self.semantic_labels = dict(
            zip(self.metadata['attributes'], self.metadata['category']))

        if _all:
            self.trainX, self.trainY = self._load_data('train')
            self.validX, self.validY = self._load_data('valid')
            self.testX = self._load_data('all')
            self.all_index = np.arange(len(self.trainX) + len(self.testX))
            self.all_count = 0
            self.valid_count = 0
        else:
            self.trainX, self.trainY = self._load_data('train')
            self.train_count = 0

            if training:
                self.validX, self.validY = self._load_data('valid')
                self.valid_count = 0
            else:
                self.testX = self._load_data('test')
                self.test_count = 0
コード例 #33
0
ファイル: stock_if.py プロジェクト: riotbew/Docs
def get_data_to_redis():
	index = int(utils.get('index'))
	if len(get_stock_list())-index<50:
		utils.set('index',0)
	stocks = get_stock_list()
	start_time = time.time()
	print index 
	try:
		if int(index) == len(stocks)-1:
			index = 0
			utils.set('index',0)
	except Exception as e:
		index = 0
		utils.set('index',0)
	
	_index = 0
	for x in range(int(index),len(stocks)):
		item = stocks[x]

		t = MYThread(target=save_stock_data, args=(item,x,), callback=CallBack)
		t.setDaemon(False)
		threads.append(t)


	while len(threadings.keys())<50:
		item = threads.pop(0)
		threadings[item.getName()] = item
		# start_time = delay(start_time)
	for v in threadings.values():
		v.start()
コード例 #34
0
ファイル: browser.py プロジェクト: thomasbshop/chromium
    def _get(self, channel="nightly"):
        if channel != "nightly":
            raise ValueError("Only nightly versions of Servo are available")

        platform, extension, _ = self.platform_components()
        url = "https://download.servo.org/nightly/%s/servo-latest%s" % (platform, extension)
        return get(url)
コード例 #35
0
def run_compile_file():
    utils.log('Starting compilation')
    p = subprocess.Popen(
        utils.get(r'compile.bat'), 
        shell=True, 
        stdout=sys.__stdout__, 
        cwd=os.getcwd())
コード例 #36
0
ファイル: utils_test.py プロジェクト: renormalist/autotest
 def testGetWithHTTP(self):
     # Yeah, this test is a bad idea, oh well
     url = 'http://www.kernel.org/pub/linux/kernel/README'
     tmppath = utils.get(url)
     f = file(tmppath)
     f.readline()
     self.assertTrue('Linux' in f.readline().split())
コード例 #37
0
ファイル: google_ngram.py プロジェクト: rajmera3/TMSD
def retrieve_absolute_percentage_counts(token, corpus, smoothing, start_year,
                                        end_year):
    '''
	This function retrieves the absolute counts for a given token.
	It first loads the relative frequencies from the ngram viewer and the absolute counts
	for the corpus from Google's source data.
	Then, it multiplies the absolute number of terms in the corpus for any given year with the
	relative frequency of the search token.
	'''

    # dictionary maps from corpus name to corpus id
    corpora = {
        'english': 15,
        'american english': 17,
        'british english': 18,
        'english fiction': 16,
        'chinese': 23,
        'french': 19,
        'german': 20,
        'hebrew': 24,
        'italian': 22,
        'russian': 25,
        'spanish': 21,
    }

    corpus_id = corpora[corpus]

    # Step 1: Load the frequency data from the ngram view

    token = token.replace(' ', '+')
    # construct the url, i.e. place the token and other parameters where they belong
    url = 'https://books.google.com/ngrams/interactive_chart?content={}&year_start={}&year_end={}' \
       '&corpus={}&smoothing={}'.format(token, start_year, end_year, corpus_id, smoothing)

    # Load the data from the page.
    page = get(url)
    if page is None: return None
    page = page.text

    # Find the places in the html where the data starts and ends
    start = page.find('var data = ')
    end = page.find('];\n', start)

    # Extract the data dictionary
    result = page[start + 12:end]
    if result == '': return None
    data = eval(result)
    frequencies = data['timeseries']

    # Step 2: load the total number of tokens per year from Google's source data
    total_counts = _load_total_counts(corpus_id, start_year, end_year)

    # Step 3: calculate the absolute number of appearances by multiplying the frequencies with the total
    #         number of tokens
    absolute_counts = [
        round(frequencies[i] * total_counts[i])
        for i in range(len(frequencies))
    ]

    return absolute_counts, [x * 100 for x in frequencies]
コード例 #38
0
ファイル: pubmed.py プロジェクト: rajmera3/TMSD
def _get_total_count():
    url = "https://med-by-year.appspot.com/showbasecounts"
    data = json.loads(get(url).text)
    total_count = {}
    for year in data['counts']:
        total_count[int(year)] = int(data['counts'][year])
    return total_count
コード例 #39
0
ファイル: scrap.py プロジェクト: psmolak/ogame-scrap
def servers(community):
    """Scrap all servers for particular community from ogame website."""
    r = utils.get(BASE.format(community))
    soup = BeautifulSoup(r.text, 'html.parser')
    tags = soup.find(id="serverLogin").find_all('option')
    servers = [int(tag.get('value').split('.')[0][1:-3]) for tag in tags]
    return servers
コード例 #40
0
ファイル: browser.py プロジェクト: bocoup/wpt-docs
    def get_profile_bundle_url(self, version, channel):
        if channel == "stable":
            repo = "https://hg.mozilla.org/releases/mozilla-release"
            tag = "FIREFOX_%s_RELEASE" % version.replace(".", "_")
        elif channel == "beta":
            repo = "https://hg.mozilla.org/releases/mozilla-beta"
            major_version = version.split(".", 1)[0]
            # For beta we have a different format for betas that are now in stable releases
            # vs those that are not
            tags = get("https://hg.mozilla.org/releases/mozilla-beta/json-tags"
                       ).json()["tags"]
            tags = {item["tag"] for item in tags}
            end_tag = "FIREFOX_BETA_%s_END" % major_version
            if end_tag in tags:
                tag = end_tag
            else:
                tag = "tip"
        else:
            repo = "https://hg.mozilla.org/mozilla-central"
            # Always use tip as the tag for nightly; this isn't quite right
            # but to do better we need the actual build revision, which we
            # can get if we have an application.ini file
            tag = "tip"

        return "%s/archive/%s.zip/testing/profiles/" % (repo, tag)
コード例 #41
0
ファイル: test.py プロジェクト: BackupTheBerlios/py-acqua-svn
	def on_plot (self, widget):
		vasche = []
		to_plot = []
		data = []
		
		for i in self.menu.get_children():
			if i.active:
				vasche.append (i.get_children()[0].get_text())
		
		for i in self.checks:
			if i.get_active ():
				to_plot.append (self.checks.index (i))
		
		# Ed ecco qui la cosa piu' assurda mai fatta
		for i in to_plot:
			for n in vasche:
				temp = []
				
				for y in utils.get ("SELECT * FROM test WHERE vasca = '%s'" % n.replace("'", "''")):
					temp2 = []
					
					temp2.append (y[1]) # data
					temp2.append (y[2]) # nome
					temp2.append (y[i+3]) # valore (offset del cazzo +2 .. +1 perche' conta da 0)
					
					temp.append (temp2)
				
				if len (temp) != 0:
					data.append (i)
					data.append (temp)
		
		del to_plot
		
		if data:
			self.plot (data)
コード例 #42
0
    def fetch(entry):
        resource, cached_timestamp = entry
        total_waiting_time = 0
        start = timer()

        for attempt in range(Snapshot.ATTEMPTS):
            request = utils.get(resource.url)

            # https://github.com/requests/requests/issues/2359
            if request.encoding is None:
                request.encoding = 'utf-8'

            xml = ElementTree.fromstring(request.text)
            new_timestamp = int(xml.attrib['timestamp'])
            now = time()

            if cached_timestamp == new_timestamp:
                ahead = math.ceil(new_timestamp + resource.endpoint.seconds -
                                  now)
                if ahead > 0 and ahead < 300:
                    sleep(ahead)
                    total_waiting_time += ahead
            else:
                end = timer()
                resource_name = '{}, {}'.format(resource.server,
                                                resource.endpoint.encode())
                if total_waiting_time > 0:
                    logger.info('Slept %ss for (%s)', total_waiting_time,
                                resource_name)

                logger.debug('Fetched (%s) in %.3fs', resource_name,
                             end - start)
                return (resource, request, new_timestamp, total_waiting_time)

        raise RuntimeError('Exceeded attempt limit')
コード例 #43
0
ファイル: browser.py プロジェクト: veeg/servo
    def install_webdriver(self, dest=None):
        """Install latest Geckodriver."""
        if dest is None:
            dest = os.getcwd()

        version = self._latest_geckodriver_version()
        format = "zip" if uname[0] == "Windows" else "tar.gz"
        logger.debug("Latest geckodriver release %s" % version)
        url = (
            "https://github.com/mozilla/geckodriver/releases/download/%s/geckodriver-%s-%s.%s"
            % (version, version, self.platform_string_geckodriver(), format))
        if format == "zip":
            unzip(get(url).raw, dest=dest)
        else:
            untar(get(url).raw, dest=dest)
        return find_executable(os.path.join(dest, "geckodriver"))
コード例 #44
0
def find_device(dnac,deviceSerial):
    response = get (dnac, "onboarding/pnp-device?serialNumber={}".format(deviceSerial))

    try:
        return response.json()[0]['id']
    except IndexError as e:
        print "Cannot find serial:{}".format(deviceSerial)
コード例 #45
0
ファイル: browser.py プロジェクト: Jayflux/servo
    def install_webdriver(self, dest=None):
        if dest is None:
            dest = os.pwd
        latest = get("https://api.github.com/repos/operasoftware/operachromiumdriver/releases/latest").json()["tag_name"]
        url = "https://github.com/operasoftware/operachromiumdriver/releases/download/%s/operadriver_%s.zip" % (latest,
                                                                                                                self.platform_string())
        unzip(get(url).raw, dest)

        operadriver_dir = os.path.join(dest, "operadriver_%s" % self.platform_string())
        shutil.move(os.path.join(operadriver_dir, "operadriver"), dest)
        shutil.rmtree(operadriver_dir)

        path = find_executable("operadriver")
        st = os.stat(path)
        os.chmod(path, st.st_mode | stat.S_IEXEC)
        return path
コード例 #46
0
ファイル: browser.py プロジェクト: shaho1090/gecko-dev
    def install_webdriver(self, dest=None, channel=None):
        if dest is None:
            dest = os.pwd
        latest = get("https://api.github.com/repos/operasoftware/operachromiumdriver/releases/latest").json()["tag_name"]
        url = "https://github.com/operasoftware/operachromiumdriver/releases/download/%s/operadriver_%s.zip" % (latest,
                                                                                                                self.platform_string())
        unzip(get(url).raw, dest)

        operadriver_dir = os.path.join(dest, "operadriver_%s" % self.platform_string())
        shutil.move(os.path.join(operadriver_dir, "operadriver"), dest)
        shutil.rmtree(operadriver_dir)

        path = find_executable("operadriver")
        st = os.stat(path)
        os.chmod(path, st.st_mode | stat.S_IEXEC)
        return path
コード例 #47
0
ファイル: browser.py プロジェクト: foolip/web-platform-tests
    def get_profile_bundle_url(self, version, channel):
        if channel == "stable":
            repo = "https://hg.mozilla.org/releases/mozilla-release"
            tag = "FIREFOX_%s_RELEASE" % version.replace(".", "_")
        elif channel == "beta":
            repo = "https://hg.mozilla.org/releases/mozilla-beta"
            major_version = version.split(".", 1)[0]
            # For beta we have a different format for betas that are now in stable releases
            # vs those that are not
            tags = get("https://hg.mozilla.org/releases/mozilla-beta/json-tags").json()["tags"]
            tags = {item["tag"] for item in tags}
            end_tag = "FIREFOX_BETA_%s_END" % major_version
            if end_tag in tags:
                tag = end_tag
            else:
                tag = "tip"
        else:
            repo = "https://hg.mozilla.org/mozilla-central"
            if channel == "beta":
                tag = "FIREFOX_%s_BETA" % version.split(".", 1)[0]
            else:
                # Always use tip as the tag for nightly; this isn't quite right
                # but to do better we need the actual build revision, which we
                # can get if we have an application.ini file
                tag = "tip"

        return "%s/archive/%s.zip/testing/profiles/" % (repo, tag)
コード例 #48
0
def change_conf(next_conf=None):
    signal = "# configurations recommended by ottertune:\n"
    next_conf = next_conf or {}

    tmp_conf_in = os.path.join(dconf.TEMP_DIR,
                               os.path.basename(dconf.DB_CONF) + '.in')
    get(dconf.DB_CONF, tmp_conf_in)
    with open(tmp_conf_in, 'r') as f:
        lines = f.readlines()

    if signal not in lines:
        lines += ['\n', signal]

    signal_idx = lines.index(signal)
    lines = lines[0:signal_idx + 1]
    if dconf.BASE_DB_CONF:
        assert isinstance(dconf.BASE_DB_CONF, dict), \
            (type(dconf.BASE_DB_CONF), dconf.BASE_DB_CONF)
        base_conf = [
            '{} = {}\n'.format(*c) for c in sorted(dconf.BASE_DB_CONF.items())
        ]
        lines.extend(base_conf)

    if isinstance(next_conf, str):
        with open(next_conf, 'r') as f:
            recommendation = json.load(
                f, encoding="UTF-8",
                object_pairs_hook=OrderedDict)['recommendation']
    else:
        recommendation = next_conf

    assert isinstance(recommendation, dict)

    for name, value in recommendation.items():
        if dconf.DB_TYPE == 'oracle' and isinstance(value, str):
            value = value.strip('B')
        lines.append('{} = {}\n'.format(name, value))
    lines.append('\n')

    tmp_conf_out = os.path.join(dconf.TEMP_DIR,
                                os.path.basename(dconf.DB_CONF) + '.out')
    with open(tmp_conf_out, 'w') as f:
        f.write(''.join(lines))

    sudo('cp {0} {0}.ottertune.bak'.format(dconf.DB_CONF))
    put(tmp_conf_out, dconf.DB_CONF, use_sudo=False)
    local('rm -f {} {}'.format(tmp_conf_in, tmp_conf_out))
コード例 #49
0
    def fetch_submission(self, slug):
        print(f"🤖 Fetching submission for problem: {slug}")
        query_params = {
            'operationName': "Submissions",
            'variables': {"offset": 0, "limit": 20, "lastKey": '', "questionSlug": slug},
            'query': '''query Submissions($offset: Int!, $limit: Int!, $lastKey: String, $questionSlug: String!) {
                                        submissionList(offset: $offset, limit: $limit, lastKey: $lastKey, questionSlug: $questionSlug) {
                                        lastKey
                                        hasNext
                                        submissions {
                                            id
                                            statusDisplay
                                            lang
                                            runtime
                                            timestamp
                                            url
                                            isPending
                                            __typename
                                        }
                                        __typename
                                    }
                                }'''
        }

        resp = self.session.post("https://leetcode.com/graphql",
                                 data=json.dumps(query_params).encode('utf8'),
                                 headers={
                                     "content-type": "application/json",
                                 })
        body = json.loads(resp.content)

        # parse data
        submissions = get(body, "data.submissionList.submissions")
        if len(submissions) > 0:
            for sub in submissions:
                if Submission.get_or_none(Submission.id == sub['id']) is not None:
                    continue

                if sub['statusDisplay'] == 'Accepted':
                    url = sub['url']
                    html = self.session.get(f'https://leetcode.com{url}').text

                    pattern = re.compile(
                        r'submissionCode: \'(?P<code>.*)\',\n  editCodeUrl', re.S
                    )

                    matched = pattern.search(html)
                    code = matched.groupdict().get('code') if matched else None
                    if code:
                        Submission.insert(
                            id=sub['id'],
                            slug=slug,
                            language=sub['lang'],
                            created=sub['timestamp'],
                            source=code.encode('utf-8')
                        ).execute()
                    else:
                        raise Exception(f"Cannot get submission code for problem: {slug}")
        random_wait(10, 15)
コード例 #50
0
def get_template(dnac, configId, supplied_params):
    params=[]
    response = get(dnac, "template-programmer/template/{}".format(configId))
    for vars in response.json()['templateParams']:
        name = vars['parameterName']
        params.append({"key": name, "value": supplied_params[name]})
    #print params
    return params
コード例 #51
0
def find_device(dnac, deviceSerial):
    response = get(
        dnac, "onboarding/pnp-device?serialNumber={}".format(deviceSerial))

    try:
        return response.json()[0]['id']
    except IndexError as e:
        print "Cannot find serial:{}".format(deviceSerial)
コード例 #52
0
def findservers(serv):
    '''Returns a server name when given a server uuid.'''
    resp = utils.get('%s/servers/%s' % (auth_data['uri'], serv),
                     auth_data['token'])
    if resp.status_code != 404:
        return resp.json['server']['name']
    else:
        return None
コード例 #53
0
 def _chromium_chromedriver_url(self, chrome_version):
     try:
         # Try to find the Chromium build with the same revision.
         omaha = get("https://omahaproxy.appspot.com/deps.json?version=" + chrome_version).json()
         revision = omaha['chromium_base_position']
         url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
             self.chromium_platform_string(), revision, self.platform_string())
         # Check the status without downloading the content (this is a streaming request).
         get(url)
     except requests.RequestException:
         # Fall back to the tip-of-tree Chromium build.
         revision_url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/LAST_CHANGE" % (
             self.chromium_platform_string())
         revision = get(revision_url).text.strip()
         url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
             self.chromium_platform_string(), revision, self.platform_string())
     return url
コード例 #54
0
ファイル: browser.py プロジェクト: dracular/wpt
 def _official_chromedriver_url(self, chrome_version):
     # http://chromedriver.chromium.org/downloads/version-selection
     parts = chrome_version.split(".")
     assert len(parts) == 4
     latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s.%s.%s" % tuple(
         parts[:-1])
     try:
         latest = get(latest_url).text.strip()
     except requests.RequestException:
         latest_url = "https://chromedriver.storage.googleapis.com/LATEST_RELEASE_%s" % parts[
             0]
         try:
             latest = get(latest_url).text.strip()
         except requests.RequestException:
             return None
     return "https://chromedriver.storage.googleapis.com/%s/chromedriver_%s.zip" % (
         latest, self.platform_string())
コード例 #55
0
ファイル: browser.py プロジェクト: larsbergstrom/servo
 def _chromium_chromedriver_url(self, chrome_version):
     try:
         # Try to find the Chromium build with the same revision.
         omaha = get("https://omahaproxy.appspot.com/deps.json?version=" + chrome_version).json()
         revision = omaha['chromium_base_position']
         url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
             self.chromium_platform_string(), revision, self.platform_string())
         # Check the status without downloading the content (this is a streaming request).
         get(url)
     except requests.RequestException:
         # Fall back to the tip-of-tree Chromium build.
         revision_url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/LAST_CHANGE" % (
             self.chromium_platform_string())
         revision = get(revision_url).text.strip()
         url = "https://storage.googleapis.com/chromium-browser-snapshots/%s/%s/chromedriver_%s.zip" % (
             self.chromium_platform_string(), revision, self.platform_string())
     return url
コード例 #56
0
ファイル: utils_test.py プロジェクト: ceph/autotest
 def testGetWithOpenFile(self):
     tmpdir = utils.get_tmp_dir()
     tmppath = os.path.join(tmpdir, 'testfile')
     tmpfile = file(tmppath, 'w')
     print >> tmpfile, 'Test string'
     tmpfile.close()
     tmpfile = file(tmppath)
     newtmppath = utils.get(tmpfile)
     self.assertEqual(file(newtmppath).read(), 'Test string\n')
コード例 #57
0
def run():
    get('http://httpstatuscodes.appspot.com/')
    get('http://httpstatuscodes.appspot.com/200')

    urls = [
        'http://httpstatuscodes.appspot.com/301',
        'http://httpstatuscodes.appspot.com/302',
        'http://httpstatuscodes.appspot.com/500',
        'http://httpstatuscodes.appspot.com/200'
    ]

    get_multiple(urls)

    print 'Falling asleep'
    time.sleep(1)
    print 'Waking up'

    get_multiple(urls)
コード例 #58
0
	def __init__ (self):
		
		lst = gtk.ListStore (int, str, str, str, str, str, str, gtk.gdk.Pixbuf, str)
		self.col_lst = [_('Id'), _('Data'), _('Vasca'), _('Tipologia'), _('Quantita'), _('Nome'), _('Soldi'), _("Immagine")]
		
		dbwindow.DBWindow.__init__ (self, 2, 4, self.col_lst,
			[utils.DataButton (), utils.Combo (), utils.Combo (),
			 utils.IntEntry (), gtk.Entry (), gtk.Entry (), utils.ImgEntry ()], lst)
		
		for y in utils.get ("select * from spese"):
			lst.append([y[0], y[1], y[2], y[3], y[4], y[5], y[6], utils.make_image(y[7]), y[7]])
		for y in utils.get ("select * from vasca"):
			self.vars[1].append_text (y[3])
		for y in [_("Vasca"), _("Pesce"), _("Pianta"), _("Invertebrato"), _("Fertilizzante"), _("Filtro"), _("Varie")]:
			self.vars[2].append_text (y)
		
		self.set_title (_("Spese"))
		self.set_size_request (600, 400)
		self.set_icon_from_file ("pixmaps/logopyacqua.jpg")
コード例 #59
0
ファイル: pypict.py プロジェクト: XuanShine/PySnifFile
def get_all_http_func(url):
    """Almost same as get_all_http but in a fontional way.

    WARNING : the output are sometimes differents.
    """
    text = get(url.strip()).text
    return map(
        (lambda text: "http" + text),
        map((lambda text: text.split("'")[0]), map((lambda text: text.replace('"', "'")), text.split("http")[1:])),
    )
コード例 #60
0
ファイル: pypict.py プロジェクト: XuanShine/PySnifFile
def get_all_http(url):
    """Search url for all http(s) links."""
    start = "http"
    text = get(url.strip()).text
    i_start = text.find(start)
    while i_start != -1:
        next = text.find('"', i_start + 1)
        i_end = next if next != -1 else text.find("'", i_start + 1)
        yield text[i_start:i_end]
        i_start = text.find(start, i_end + 1)