def makeChapters(self): log.debug("makeChapters") for chapter in self.chapters: log.debug("make Pages for chapter %s" % chapter.number) c = utils.checkChapterDir(self.basedir, self.title, chapter.number) if c != 0 and self.action == "append" and \ self.what_chapter == "all": continue try: soup = BS(utils.get_url(chapter.url, self.lookups).read()) except KeyError as e: log.debug("KeyError %s, adding to lookups" % e) self.addLookup(chapter.url) log.debug("Retrying...") soup = BS(utils.get_url(chapter.url, self.lookups).read()) for page in soup.find_all('a'): if "Last Page" in page.get_text(): log.debug(page.get('href')) lastpage = page.get_text().rstrip(')').split('(')[-1] baseurl = page.get('href') for numPage in range(0, int(lastpage)): number = numPage + 1 url = "%s/%d" % (baseurl, number) Page = manga.Page(utils.unifyNumber(number), url) chapter.Pages.append(Page) self.downloadBook()
def get_offices(self): points = [] point = Point() point.prov = self.uid point.type = TYPE_OFFICE point.name = u'Головное отделение' point.address = u'г. Минск, ул. Некрасова, 114' point.lat = 53.940182 point.lng = 27.56712 point.phones = [u'88011006000'] point.time = u'пн-чт: 09.00-17.00, перерыв: 13.00-13.50, пт и предпраздничные дни: 09.00-16.00, перерыв: 13.00-13.40, сб, вс: выходные' point.check_coordinates = CHECK_OFFICIAL point.check_information = CHECK_OFFICIAL points.append(point) page = PQ(get_url(self.__parse_data_office_cbu_url)) for item in map(PQ, page('.itemFilial')): point = self.__parse_office(item) if point: points.append(point) page = PQ(get_url(self.__parse_data_office_retail_url)) for item in map(PQ, page('.itemFilial')): point = self.__parse_office(item) if point: points.append(point) return points
def get_offices(self): points = [] items_tree = ET.fromstring(get_url(self.__offices_xml_url)) for item in items_tree.iter('item'): point = self.__parse_office(item) if point: points.append(point) page = PQ(get_url(self.__regional_offices_page_url)) point = None for item in map(PQ, page('#content_internal span:eq(0)').children()): if item[0].tag not in self.__regional_offices_tags: continue if item[0].tag == 'h2': point = Point() point.prov = self.uid point.type = TYPE_OFFICE point.name = trim_spaces_and_commas(normalize_text(item.text())) point.check_information = CHECK_OFFICIAL continue if not point: continue item_html = replace_br(item.html(), ';;;') sub_items = PQ(item_html).text().split(';;;') point.address, point.place = split_address_place(sub_items[0]) for sub_item in map(normalize_text, sub_items[1:]): if sub_item.startswith(u'т.ф.:'): point.phone = normalize_phones(sub_item[len(u'т.ф.:'):].split(',')) warning_not_official_coordinates(point) points.append(point) point = None return points
def login(): ''' Handle user logins. ''' now = datetime.now() launch = datetime(2016, 3, 28, 19, 30, 0) begin = now >= launch print(launch, now) if begin is False: return redirect('/') if request.method == "POST": username = request.form.get('username').strip() password = request.form.get('password').strip() if validate_user(database, username, password): auth_user = User(username) login_user(auth_user) user_level = get_level(database, current_user.id) return redirect(get_url(database, user_level)) else: return render_template('login.html', force=False, error=True) else: if current_user.is_authenticated: user_level = get_level(database, current_user.id) resume = get_url(database, user_level) return render_template('login.html', force=True, username=current_user.id, error=False, resume=resume) return render_template('login.html', force=False, error=None)
def get_offices(self): points = [] offices_page = PQ(get_url(self.__parse_data_offices_url)) for office_item in map(PQ, offices_page('.office a')): url = self.site + office_item.attr('linkoffice') page = PQ(get_url(url)) point = self.__parse_office(page('#body')) if point: points.append(point) return points
def test_get_url(self): data = { '1-MMN-150': self.add_base_url('1-MMN-150.html'), '1-MAT-150': self.add_base_url('1-MAT-150.html') } for in_data, out_data in data.iteritems(): self.assertEqual(utils.get_url(in_data, lang='sk'), out_data) self.assertFalse(utils.get_url('asdf', lang='es')) self.assertFalse(utils.get_url(''))
def test_get_url(self): data = { '1-MMN-150': 'https://sluzby.fmph.uniba.sk/infolist/SK/1-MMN-150.html', '1-MAT-150': 'https://sluzby.fmph.uniba.sk/infolist/SK/1-MAT-150.html' } for in_data, out_data in data.iteritems(): self.assertEqual(utils.get_url(in_data, lang='sk'), out_data) self.assertFalse(utils.get_url('asdf', lang='es')) self.assertFalse(utils.get_url(''))
def get_offices(self): points = [] cities = self.__get_cities(self.__cities_offices_list_url) for city_id, param_iblock_id, param_balun_name, city_name in cities: city_page = PQ(get_url(self.__list_offices_url_template.format(city_id))) for item in map(PQ, city_page('.content_table table tbody tr.first_td')): item_id = item('td:eq(0) a').attr('id').replace('office', '') item_data_url = self.__parse_data_office_url_template.format(item_id, param_iblock_id, param_balun_name) data_page = PQ(get_url(item_data_url).decode('utf8')) point = self.__parse_office(data_page, city_name) if point: points.append(point) return points
def get_terminals(self): points = [] country_page = PQ(get_url(self.__terminal_search_url)) for item in map(PQ, country_page('.content select:eq(1) option:gt(0)')): input2_value = urllib.quote(item.attr('value').encode('cp1251')) region_url = self.__terminal_search_url + '&input2=' + input2_value region_page = PQ(get_url(region_url)) for item in map(PQ, region_page('.content select:eq(2) option:gt(0)')): input3_value = urllib.quote(item.attr('value').encode('cp1251')) city_page = PQ(get_url(region_url + '&input3=' + input3_value)) for item in map(PQ, city_page('.content .solid_table tr:gt(0)')): point = self.__parse_terminal(item) if point: points.append(point) return points
def get_atms(self): points = [] coordinates = {} coordinates_tree = ET.fromstring(get_url(self.__atms_xml_coordinates_url)) for item in coordinates_tree.iter('item'): terminal_id = item.find('terminal_id').text lat = item.find('lattitude').text lng = item.find('longitude').text coordinates[terminal_id] = (lat, lng) items_tree = ET.fromstring(get_url(self.__atms_xml_url)) for item in items_tree.iter('item'): point = self.__parse_atm(item, coordinates) if point: points.append(point) return points
def make_programs_list(url): try: params = utils.get_url(url) programs = comm.get_series(params["series_id"]) num_programs = len(programs) ok = True for p in programs: # Don't show any 'promo' shows. They don't get returned by Brightcove if p.duration and p.duration < 1000: utils.log("Skipping program %s (duration <19 mins)" % p.get_list_title()) num_programs -= 1 continue listitem = xbmcgui.ListItem(label=p.get_list_title(), iconImage=p.get_thumbnail(), thumbnailImage=p.get_thumbnail()) listitem.setInfo('video', p.get_xbmc_list_item()) if hasattr(listitem, 'addStreamInfo'): listitem.addStreamInfo('audio', p.get_xbmc_audio_stream_info()) listitem.addStreamInfo('video', p.get_xbmc_video_stream_info()) # Build the URL for the program, including the list_info url = "%s?program_id=%s" % (sys.argv[0], p.id) # Add the program item to the list ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=False, totalItems=num_programs) xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok) xbmcplugin.setContent(handle=int(sys.argv[1]), content='episodes') except: utils.handle_error("Unable to fetch program listing")
def __get_regions_ids(self, url): ids = [] page = PQ(get_url(url)) for item in map(PQ, page('.top_block_menu .region a')): id = item.attr('href').replace('section.php?SECTION_ID=', '') ids.append(id) return ids
def parse_xbmc_url(self, string): """ Takes a string input which is a URL representation of the program object """ d = utils.get_url(string) if 'id' in d: self.id = d.get('id') if 'title' in d: self.title = d.get('title') if 'description' in d: self.description = d.get('description') if 'genre' in d: self.genre = d.get('genre') if 'season' in d: self.season = d.get('season') if 'duration' in d: self.duration = d.get('duration') if 'url' in d: self.url = urllib.unquote_plus(d.get('url')) if 'thumbnail' in d: self.thumbnail = urllib.unquote_plus(d.get('thumbnail')) if 'date' in d: ts = time.strptime(d.get('date'), '%Y-%m-%d %H:%M:%S') timestamp = time.mktime(ts) self.date = datetime.date.fromtimestamp(timestamp) if 'ooyalaid' in d: self.ooyalaid = d.get('ooyalaid') if 'isdummy' in d: self.isdummy = d.get('isdummy')
def make_series_list(url): params = utils.get_url(url) try: iview_config = comm.get_config() series_list = comm.get_programme(iview_config, params["category_id"]) series_list.sort() ok = True for s in series_list: url = "%s?series_id=%s" % (sys.argv[0], s.id) thumbnail = s.get_thumbnail() listitem = xbmcgui.ListItem(s.get_list_title(), iconImage=thumbnail, thumbnailImage=thumbnail) listitem.setInfo('video', { 'plot' : s.get_description() }) # add the item to the media list ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=True) xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok) xbmcplugin.setContent(handle=int(sys.argv[1]), content='tvshows') except: d = xbmcgui.Dialog() message = utils.dialog_error("Unable to fetch listing") d.ok(*message) utils.log_error();
def make_list(url): try: params = utils.get_url(url) category = params.get('category') videos = comm.get_videos(category) utils.log("Found %s videos" % len(videos)) # fill media list ok = True for v in videos: listitem = xbmcgui.ListItem(label=v.get_title(), thumbnailImage=v.get_thumbnail()) listitem.setInfo('video', v.get_xbmc_list_item()) listitem.addStreamInfo('video', v.get_xbmc_stream_info()) listitem.setProperty('IsPlayable', 'true') # Build the URL for the program, including the list_info url = "%s?%s" % (sys.argv[0], v.make_xbmc_url()) # Add the program item to the list ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=False) xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok) xbmcplugin.setContent(handle=int(sys.argv[1]), content='episodes') except Exception, e: utils.handle_error('Unable to fetch video list', exc=e)
def make_programs_list(url): try: params = utils.get_url(url) iview_config = comm.get_config() programs = comm.get_series_items(iview_config, params["series_id"]) ok = True for p in programs: listitem = xbmcgui.ListItem(label=p.get_list_title(), iconImage=p.get_thumbnail(), thumbnailImage=p.get_thumbnail()) listitem.setInfo('video', p.get_xbmc_list_item()) if hasattr(listitem, 'addStreamInfo'): listitem.addStreamInfo('audio', p.get_xbmc_audio_stream_info()) listitem.addStreamInfo('video', p.get_xbmc_video_stream_info()) # Build the URL for the program, including the list_info url = "%s?play=true&%s" % (sys.argv[0], p.make_xbmc_url()) # Add the program item to the list ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=False, totalItems=len(programs)) xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok) xbmcplugin.setContent(handle=int(sys.argv[1]), content='episodes') except: d = xbmcgui.Dialog() msg = utils.dialog_error("Unable to fetch listing") d.ok(*msg) utils.log_error();
def play(params): __addon__ = xbmcaddon.Addon() p = utils.get_url(params) # Show a dialog d = xbmcgui.DialogProgress() d.create(config.NAME, "Starting %s..." % p['name']) try: thumb = os.path.join(__addon__.getAddonInfo('path'), "resources", "img", "%s.jpg" % p['id']) labels = { "title": p['name'], "artist": "AFL Radio", "genre": "Sport" } listitem = xbmcgui.ListItem(p['name']) listitem.setInfo(type='music', infoLabels=labels) listitem.setThumbnailImage(thumb) # PAPlayer or AUTO fails here for some absurd reason xbmc.Player(xbmc.PLAYER_CORE_DVDPLAYER).play(p['url'], listitem) except: # user cancelled dialog or an error occurred d = xbmcgui.Dialog() message = utils.dialog_error("Unable to play %s" % p['name']) d.ok(*message) utils.log_error();
def make_programs_list(url): try: params = utils.get_url(url) programs = comm.get_series_from_feed(params['series'], category=params['category']) ok = True for p in programs: listitem = xbmcgui.ListItem(label=p.get_list_title(), iconImage=p.get_thumbnail(), thumbnailImage=p.get_thumbnail()) listitem.setInfo('video', p.get_xbmc_list_item()) listitem.setProperty('IsPlayable', 'true') if hasattr(listitem, 'addStreamInfo'): listitem.addStreamInfo('audio', p.get_xbmc_audio_stream_info()) listitem.addStreamInfo('video', p.get_xbmc_video_stream_info()) # Build the URL for the program, including the list_info url = "%s?play=true&%s" % (sys.argv[0], p.make_xbmc_url()) # Add the program item to the list ok = xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=url, listitem=listitem, isFolder=False, totalItems=len(programs)) xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok) xbmcplugin.setContent(handle=int(sys.argv[1]), content='episodes') except: utils.handle_error('Unable to fetch program list')
def play(url): try: params = utils.get_url(url) if 'id' in params: video_id = params['id'] v = comm.get_video(video_id) elif 'url' in params or 'ooyalaid' in params: v = classes.Video() v.parse_xbmc_url(url) if 'ooyalaid' in params: loginToken = ooyalahelper.get_afl_user_token() stream_url = ooyalahelper.get_m3u8_playlist(params['ooyalaid'], 'true', loginToken, 'AFL') else: stream_url = v.get_url() listitem = xbmcgui.ListItem(label=v.get_title(), iconImage=v.get_thumbnail(), thumbnailImage=v.get_thumbnail(), path=stream_url) listitem.addStreamInfo('video', v.get_xbmc_stream_info()) listitem.setInfo('video', v.get_xbmc_list_item()) xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, listitem=listitem) except Exception as e: utils.handle_error('', e)
def __get_offices(self, url, city_name=''): points = [] page = PQ(get_url(url).decode('utf8')) time = None for item in map(PQ, page('#oo__content_value table tr:gt(0)')): if item('td').attr('colspan') == '3': continue point = Point() point.prov = self.uid point.type = TYPE_OFFICE point.name = normalize_text(item('td:eq(0)').text()) point.address = normalize_address(city_name + item('td:eq(1) p:eq(0)').text()) place = item('td:eq(1) p:eq(2)').text() if not place: place = item('td:eq(1) p:eq(1)').text() if place: point.place = normalize_text(place) new_time = item('td:eq(2)').text() if new_time: time = new_time point.time = normalize_time(time) point.check_information = CHECK_OFFICIAL if point.address in self.__addresses: point.lat, point.lng = self.__addresses[point.address] point.check_coordinates = CHECK_OFFICIAL else: warning_not_official_coordinates(point) points.append(point) return points
def parse_xbmc_url(self, string): """ Takes a string input which is a URL representation of the program object """ d = utils.get_url(string) if d.has_key("id"): self.id = d["id"] if d.has_key("title"): self.title = d["title"] if d.has_key("episode_title"): self.episode_title = d["episode_title"] if d.has_key("description"): self.description = d["description"] if d.has_key("duration"): self.duration = d["duration"] if d.has_key("category"): self.category = d["category"] if d.has_key("rating"): self.rating = d["rating"] if d.has_key("date"): timestamp = time.mktime(time.strptime(d["date"], "%d/%m/%Y %H:%M:%S")) self.date = datetime.date.fromtimestamp(timestamp) if d.has_key("thumbnail"): self.thumbnail = urllib.unquote_plus(d["thumbnail"]) if d.has_key("url_path"): self.url_path = d["url_path"]
def make_list(url): params = utils.get_url(url) try: # Show a dialog pDialog = xbmcgui.DialogProgress() pDialog.create('Unofficial AFL Video', 'Fetching match list') # Temporary until we tag our match videos with the match id if params.has_key('match'): videos_url = config.MATCH_URL % params['match'] else: videos_url = config.VIDEOS_URL + url + '&output=json' programs = comm.fetch_videos(videos_url) # fill media list ok = fill_media_list(programs) except: # oops print error message print "ERROR: %s (%d) - %s" % (sys.exc_info()[2].tb_frame.f_code.co_name, sys.exc_info()[2].tb_lineno, sys.exc_info()[1]) ok = False # send notification we're finished, successfully or unsuccessfully xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=ok)
def __get_points(self, id, parse_point): points = [] page = PQ(get_url(self.__points_data_url_template.format(id))) paging_urls = [] for item in map(PQ, page("form[name=fcatalog] .nopt a")): url = item.attr("href") paging_urls.append(self.site + url) paging_urls = [page] + list(set(paging_urls)) for page in paging_urls: if type(page) == str: page = PQ(get_url(page)) for item in map(PQ, page("form[name=fcatalog] #catalog tr:gt(0)")): point = parse_point(item) if point: points.append(point) return points
def makePages(self): log.debug("makePages") for chapter in self.chapters: log.debug("make Pages for chapter %s" % chapter.number) c = utils.checkChapterDir(self.basedir, self.title, chapter.number) if c != 0 and self.action == "append" and \ self.what_chapter == "all": continue soup = BS(utils.get_url("%s%s%s" % (self.baseurl, chapter.url, self.suffix), self.lookups).read()) for image in soup.find_all('img'): image_url = image.get('src') if "manga-img" in image_url: img_host = image_url.split('/', 3)[-2] if img_host not in self.lookups: self.lookups[img_host] = dnslookup(img_host, 'A')[0] image_url = image_url.replace(img_host, self.lookups[img_host]) image_ext = image_url.rsplit('.', 1)[-1] last_page_number = 0 if len(chapter.Images): last_page_number = int(chapter.Images[-1].number .split('.')[0]) + 1 i = manga.Image(utils.pageNumber(image_url, last_page_number), image_url, img_host, image_ext) chapter.Images.append(i) sorted(chapter.Images, key=lambda img: img.number) self.downloadBook()
def get_dep_files(self, build_deps=False, recommend_deps=False, source_deps=False, zip_runtime_deps=False): files = [] url_dest = config.packaging_dir + os.sep + 'external_zip_pkg' + os.sep + self.package_env.name deps = [] if build_deps: deps += self.get_mono_deps() if recommend_deps: deps += self.get_mono_recommend_deps() if source_deps: deps += self.get_mono_source_deps() for dep in deps: # Get files for mono deps # Woah, total cheat here, I imported packaging, and am using it! package = packaging.package(self.package_env, dep, HEAD_or_RELEASE=self.HEAD_or_RELEASE) # If this is a recommended package, don't fail if missing if self.get_mono_recommend_deps().count(package.name): fail_flag = False else: fail_flag = True files += package.get_files(fail_on_missing=fail_flag) # Get url files urls = package.get_distro_zip_deps() if zip_runtime_deps: urls += package.get_distro_zip_runtime_deps() for url in urls: files += [ url_dest + os.sep + os.path.basename(url) ] utils.get_url(url, url_dest) # Get rpm deps urls += self.get_rpm_deps() # Get url files urls = self.get_distro_zip_deps() if zip_runtime_deps: urls += self.get_distro_zip_runtime_deps() urls += self.get_rpm_deps() for url in urls: files += [ url_dest + os.sep + os.path.basename(url) ] utils.get_url(url, url_dest) return utils.remove_list_duplicates(files)
def test_get_url(self): redirect_uri = 'example.com' api = self.API url = utils.get_url(api, redirect_uri=redirect_uri) self.assertEqual(url, 'http://%s/oauth2/authorize?client_id=%s&redirect_uri=%s&response_type=code' % (api.host, self.DUOSHUO_SHORT_NAME, redirect_uri) )
def get_exchanges(self): points = [] page = PQ(get_url(self.__parse_data_exchange_url)) for item in map(PQ, page('.itemFilial')): point = self.__parse_exchange(item) if point: points.append(point) return points
def __get_cities_ids(self): if not self.__cities_ids: page = PQ(get_url(self.__cities_url)) for item in map(PQ, page('.b-cities-list a *')): url = item.attr('id').replace('city_', '') city = normalize_text(item.text()) self.__cities_ids.append((url, city)) return self.__cities_ids
def get_terminals(self): points = [] page = PQ(get_url(self.__parse_data_terminals_url)) for item in map(PQ, page('.office-list tr:gt(0)')): point = self.__parse_terminal(item) if point: points.append(point) return points
def get_exchanges(self): points = [] page = PQ(get_url(self.__parse_data_offices_and_exchanges_url)) for item in map(PQ, page('.office-list tr:gt(0)')): point = self.__parse_exchange(item) if point: points.append(point) return points
async def _inner(evt): msg = evt.message sender = evt.message.sender if sender and any(sender.id == p or sender.username == p for p in people): channel = await self._client.get_entity(msg.to_id) header = "{}在{}说了: ".format( ' '.join([msg.sender.first_name, msg.sender.last_name]), channel.title) body = evt.raw_text[:20] + ('...' if len(evt.raw_text) > 20 else '') url = get_url(channel, msg) await self._send_to_ifttt_async(event, key, header, body, url)
def get_active(): form = ItemForm() cart = utils.get_url(API_URL+'/cart/active/'+current_user.id)['items'] if cart == None: cart = [] if form.validate_on_submit(): items = {} items[form.name.data] = form.amount.data cart = Cart(author=current_user.id, items=items) utils.put(API_URL+'/cart', cart.to_json()) return redirect(url_for('.get_active')) return render_template('views/base/account/account_cart.html', title='Cart', bg_img='side.jpg', form=form, cart=cart)
def extract_resource_from_request(): """Extracts and returns a python file type object from POST field data.""" if not request.form and not request.files: raise ValueError("Received no data.") if request.form: input_file = get_url(request.form["url"]) return input_file else: if not isinstance(request.files["file"], FileStorage): raise ValueError("Invalid file type.") return request.files["file"]
def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{}\n{}'.format(title, link) if what not in ('img'): return False # elif what == 'album': # just_send_an_album(t_channel, url, bot) # return True return r2t.send_img(url, ext, text)
def asm_news(dbo, update=False): s = cstring(dbo, "ASMNews") if s == "" or update: try: s = utils.get_url(URL_NEWS)["response"] al.debug("Updated ASM news, got %d bytes" % len(s), "configuration.asm_news", dbo) cset(dbo, "ASMNews", s, sanitiseXSS=False) except: em = str(sys.exc_info()[0]) al.error("Failed reading ASM news: %s" % em, "configuration.asm_news", dbo) return s
def assert_change_password_invalid(client, old_password, new_password): with pytest.raises(StatusCodeException) as e: client.users.change_password(old_password, new_password) utils.assert_status_code_exception( exception=e.value, status_code=400, method="POST", url=utils.get_url("changepwd"), data={ "error": "Unable to update password: Old password does not match." }, )
def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{}\n\n/r/{}\n{}'.format(title, subreddit, link) if what == 'album': base_url = submission.url text = '{}\n{}\n\n/r/{}\n{}'.format(title, base_url, subreddit, link) r2t.send_text(text) r2t.send_album(url) return True return r2t.send_gif_img(what, url, ext, text)
def historical_quotes(self, sym, is_compact): sym = sym.replace('-', '.') qdate, qopen, qhigh, qlow, qclose, qvolume = self.quote(sym) if qdate is None: return None if is_compact is True: url = API_BASE_STOCK + sym + '/chart/3m' else: url = API_BASE_STOCK + sym + '/chart/2y' data = utils.get_url(url) if data is None: return None jdata = json.loads(data) dates = [] open = [] high = [] low = [] close = [] volume = [] for item in jdata: if 'open' not in item: return None dates.append(item['date']) open.append(float(item['open'])) high.append(float(item['high'])) low.append(float(item['low'])) close.append(float(item['close'])) volume.append(int(item['volume'])) if dates[-1] < qdate: dates.append(qdate) open.append(qopen) high.append(qhigh) low.append(qlow) close.append(qclose) volume.append(qvolume) df = pd.DataFrame(index=dates) df.index.name = 'date' df['open'] = open df['high'] = high df['low'] = low df['close'] = close df['volume'] = volume return df
def main(): url = utils.get_url() output_path = utils.get_output_path() audio_only = utils.get_audio_only() if not audio_only: video_quality = utils.get_stream_quality() if audio_only: stream_list = utils.list_streams(url, audio_only) if not stream_list: print( 'There are no streams available for the options that you specified.' ) return itag = utils.get_itag() utils.download_stream(url, itag, audio_only, output_path) utils.print_video_statistics(url) elif video_quality == 1: progressive = True stream_list = utils.list_streams(url, audio_only, progressive) if not stream_list: print( 'There are no streams available for the options that you specified.' ) return itag = utils.get_itag() utils.download_stream(url, itag, audio_only, output_path) utils.print_video_statistics(url) else: progressive = False stream_lists = utils.list_streams(url, audio_only, progressive) if not stream_lists[0]: print( 'There are no video streams available for the options that you specified.' ) return elif not stream_lists[1]: print( 'There are no audio streams available for the options that you specified.' ) return elif not stream_lists: print( 'There are no video or audio streams available for the options that you specified.' ) return itags = utils.get_adaptive_itags() utils.download_and_mux(url, itags[0], itags[1], output_path)
def parse_mendeley_html(base_url): """ Takes a mendeley profile url. Returns the user's publications in bibjson format. """ # We know that the parsed_url's path starts with /profiles/ # and the scheme for mendeley urls is # www.mendeley.com/profiles/userid/other_things # so if we split the path by /, then user_id is split_path[2]. purl = urlparse.urlparse(base_url) split_path = purl.path.split("/") user_id = split_path[2] sanitized_path = "/profiles/" + user_id + "/publications/journal/" new_url_tuple = (purl.scheme, purl.netloc, sanitized_path, "", "", "") url = urlparse.urlunparse(new_url_tuple) page = get_url(url) soup = ET.fromstring(page.getvalue()) try: pagination = soup.get_element_by_id("user-publications").find_class( "right")[0] except KeyError: return [] num_pages = len(pagination.cssselect("div.pagemenu > ul > li")) if num_pages == 0: num_pages = 1 citation_dict = {} for i in range(num_pages): page_url = "/".join([url, str(i)]) page = get_url(page_url) soup = ET.fromstring(page.getvalue()) citation_dict.update(parse_citation_page(soup)) citation_list = [item for item in citation_dict.itervalues()] return citation_list
def revise_url(site, before, after): for subject in SUBJECTS: url = utils.get_url(subject) for k in url[site].keys(): link = url[site][k] if before is None and after is None: # 스타보드 검색으로 변경하기 위해서 URL을 키워드로만 변경 b_word = quote_plus(k) url[site][k] = b_word else: url[site][k] = link.replace(before, after) print(url[site][k]) # URL 재 저장 utils.save_url(subject, url)
def get_chapter(chapter_url): chtml = "" try: r = utils.get_url(chapter_url) r.encoding = 'gbk' chtml = r.text except Exception: print(chapter_url) traceback.print_exc() sys.exit(1) c = BeautifulSoup(chtml, "html.parser") main = c.find("div", id="main") raw = main.find_all("div")[4].find_all("p")[2].text return "\n".join([x.strip() for x in raw.split("\n")])
def send_post(submission, r2t): what, _, _ = get_url(submission) if what != 'text': False texts = [submission.title] punchline = submission.selftext.strip() if len(punchline) > 0: texts.append(punchline) main_text = '\n\n'.join(texts) link = submission.shortlink text = '{main}\n\n{link}'.format( main=main_text, link=link) r2t.send_text(text, disable_web_page_preview=True) return True
def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{title}\n\n{link}\n{channel}'.format(title=title, link=link, channel=t_channel) if what == 'img': if r2t.dup_check_and_mark(url) is True: return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION return r2t.send_gif_img(what, url, ext, text) else: return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION
def send_post(submission, r2t): what, _, _ = get_url(submission) if what != 'text': return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION if submission.score < 111: return SupplyResult.SKIP_FOR_NOW texts = [submission.title] punchline = submission.selftext.strip() if len(punchline) > 0: texts.append(punchline) main_text = '\n\n'.join(texts) link = submission.shortlink text = '{main}\n\n{link}'.format(main=main_text, link=link) return r2t.send_text(text, disable_web_page_preview=True)
def create(self, request, *args, **kwargs): """ Adds a new url and returns the corresponding short url """ serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return response.Response( { 'short_url': get_url(self.request, serializer.data['code']), }, status=status.HTTP_201_CREATED, headers=headers)
def analyse(url, GET, data=None, PATH=False, header=HEADER): param_msg = {} # 用于收集各个参数 对应的 context,position信息 # 输入:GET、PATH\url\data # 明确 要请求的参数data,以及请求的url if GET: if PATH: # 从url路径中取要替换的成分 data = get_valid_paths(url) else: # 从url参数中取要替换的成分 url_parse_result = urlparse( url ) # ParseResult(scheme='http', netloc='192.168.1.46', path='/dvwa/vulnerabilities/xss_r/', params='', query='name=hi', fragment='') query = url_parse_result.query if query == "": query = url_parse_result.path.split("/")[-1] data = get_query_dict(query) # 参数键值对 url = get_url(url, GET) # request库中url和参数是分开传参的,故得到没有参数的url # 对于post data就是data for param in data: scout_params = copy.deepcopy(data) scout_str = gen_scout_str() # 特征字符串 if PATH: # 对于url路径参数 repace_url = url # 防止一个url被替换多次,故不改变原始url repace_url = repace_url.replace(param, scout_str) resp = requester(repace_url, headers=header, data=None, GET=GET, delay=0, timeout=30) else: # 对于get ,post参数 scout_params[param] = scout_str resp = requester(url, data=scout_params, headers=header, GET=GET, delay=0, timeout=30) text = resp.text parser = HtmlParser(target=scout_str) parser.feed(text) logger.info("参数{}的上下文:{}".format(param, parser.context)) logger.red_line() param_msg.update({param: parser.context}) # logger.info("param_msg:%s" % str(param_msg)) get_effective_chars(url, data, GET, param_msg, header=header)
def parse_xbmc_url(self, string): """ Takes a string input which is a URL representation of the program object """ d = utils.get_url(string) self.id = d['id'] self.title = d['title'] self.description = d['description'] if d.has_key('duration'): self.duration = d['duration'] self.category = d['category'] self.rating = d['rating'] timestamp = time.mktime(time.strptime(d['date'], '%d/%m/%Y %H:%M:%S')) self.date = datetime.date.fromtimestamp(timestamp) self.thumbnail = d['thumbnail']
def send_post(submission, r2t): what, url, ext = get_url(submission) if what not in ('gif', 'img'): return False title = submission.title link = submission.shortlink if submission.over_18: url = submission.url text = 'NSFW\n{}\n{}\n\n{}\n\nby @just_hmmm'.format(url, title, link) return r2t.send_text(text, disable_web_page_preview=True) text = '{}\n{}\n\nby @just_hmmm'.format(title, link) return r2t.send_gif_img(what, url, ext, text)
def quote(self, sym): sym = sym.replace('-', '.') url = API_BASE_STOCK + sym + '/quote' data = utils.get_url(url) if data is None: return None, 0, 0, 0, 0, 0 else: jdata = json.loads(data) dt = parser.parse(jdata['latestTime']) date = dt.strftime('%Y-%m-%d') return date, float(jdata['open']), float(jdata['high']), float( jdata['low']), float(jdata['close']), int( jdata['latestVolume'])
def send_post(submission, r2t): what, gif_url = get_url(submission) if what != 'gif': return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION title = submission.title link = submission.shortlink if submission.over_18: url = submission.url text = '🛑NSFW🛑\n{}\n{}\n\n{}\n\nby @r_gifs'.format(url, title, link) return r2t.send_text(text, disable_web_page_preview=True) text = '{}\n{}\n\nby @r_gifs'.format(title, link) return r2t.send_gif(gif_url, text)
def send_post(submission, r2t): what, gif_url, ext = get_url(submission) if what != 'gif': return False title = submission.title link = submission.shortlink if submission.over_18: url = submission.url text = 'NSFW\n{}\n{}\n\n{}\n\nby @r_gifs'.format(url, title, link) return r2t.send_text(text, disable_web_page_preview=True) text = '{}\n{}\n\nby @r_gifs'.format(title, link) return r2t.send_gif(gif_url, ext, text)
def login(account_number: str, pin: str): keypad, secret, key = download_keypad() translated_pin = ",".join(str(keypad.index(x)) for x in pin) auth_pin = encrypt_pin(translated_pin, key) return signed_request( "POST", get_url(login_url), token="", body={}, headers={ "x-authcif": account_number, "x-authpin": auth_pin, "x-authsecret": secret }, )
def send_post(submission, bot): what, gif_url = get_url(submission) if what != "gif": return False # Download gif download_file(gif_url, 'r_gifs.gif') # Telegram will not autoplay big gifs if os.path.getsize('r_gifs.gif') > 10 * 1024 * 1024: return False title = submission.title link = submission.short_link text = '%s\n%s\n\nby @r_gifs' % (title, link) with open('r_gifs.gif', 'rb') as f: bot.sendDocument(t_channel, f, caption=text) return True
def send_post(submission, r2t): what, url, ext = get_url(submission) # If this func returns: # False – it means that we will not send # this submission, let's move to the next. # True – everything is ok, we send the submission # None – we do not want to send anything this time, # let's just sleep. # Get all data from submission that we need title = submission.title # Tilte of the submission punchline = submission.selftext # Text content of the submission (not always) base_url = submission.url # Link of the submission (not always) link = submission.shortlink # Reddit link to the submission # Create a text for a tg post # Base text (for every case) text = title + "\n\n" # Add text content if exists if punchline: # is not None or Empty text += punchline + "\n\n" # Add link if exists if base_url and what == 'other': # base_url is not None or Empty and what is other text += base_url + "\n\n" # Add another new line if there is a text content or a link if punchline or (base_url and what == 'other'): # is not None or Empty text += "\n" # Base text (for every case) text += link # How to send a post if what == 'text': return r2t.send_text(text) # returns True elif what == 'other': return r2t.send_text(text) # returns True elif what == 'album': r2t.send_text(text) r2t.send_album(url) return True elif what in ('gif', 'img'): return r2t.send_gif_img(what, url, ext, text) # returns True else: return False
def send_post(submission, r2t): what, url, ext = get_url(submission) if what not in ('gif', 'img'): return SupplyResult.DO_NOT_WANT_THIS_SUBMISSION title = submission.title link = submission.shortlink if submission.over_18: url = submission.url text = '🙈NSFW\n{}\n{}\n\n{}\n\nby @just_hmmm'.format( url, title, link) return r2t.send_text(text, disable_web_page_preview=True) text = '{}\n{}\n\nby @just_hmmm'.format(title, link) return r2t.send_gif_img(what, url, ext, text)
def list(self, request, *args, **kwargs): """ Outputs the list of links """ queryset = self.filter_queryset(self.get_queryset()) page = self.paginate_queryset(queryset) if page is not None: serializer = self.get_serializer(page, many=True) return self.get_paginated_response(serializer.data) serializer = self.get_serializer(queryset, many=True) data = [] for link in serializer.data: link['short_url'] = get_url(self.request, link['code']) data.append(link) return response.Response(data)
def get_novelinfo(self, novel_url): soup = utils.get_url(novel_url) title = soup.select('span[itemprop="articleSection"]')[0].text author = soup.select('span[itemprop = "author"]')[0].text novel_intro = bs( str(soup.select('div#novelintro')[0]).replace('<b>', ''), 'lxml') # 标签树->文本->标签树,避免该标签导致换行 summary = utils.loop_tag(novel_intro) novel_info = {'title': title, 'author': author, 'summary': summary} if novel_info: print('get novelinfo successfully.') else: print('get novelinfo failed.') return novel_info
def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink text = '{}\n{}'.format(title, link) if what == 'album': base_url = submission.url text = '{}\n{}\n\n{}'.format(title, base_url, link) r2t.send_text(text) r2t.send_album(url) return True elif what in ('gif', 'img'): return r2t.send_gif_img(what, url, ext, text) else: return False
def send_post(submission, r2t): what, url, ext = get_url(submission) title = submission.title link = submission.shortlink sub = submission.subreddit upvotes = submission.score if what == 'text': punchline = submission.selftext text = '{title}\n\n{main_text}\n\n{votes} upvotes\n/r/{subreddit}\n{link}'.format( title=title, main_text=punchline, subreddit=sub, link=link, votes=upvotes) if len(text) > 2345: return False return r2t.send_text(text) if what == 'other': base_url = submission.url text = '{title}\n{base_url}\n\n{votes} upvotes\n/r/{subreddit}\n{link}'.format( title=title, base_url=base_url, subreddit=sub, link=link, votes=upvotes) return r2t.send_text(text) if what == 'album': base_url = submission.url text = '{title}\n{base_url}\n\n{votes} upvotes\n/r/{subreddit}\n{link}'.format( title=title, base_url=base_url, subreddit=sub, link=link, votes=upvotes) r2t.send_text(text) r2t.send_album(url) return True if what in ('gif', 'img'): text = '{title}\n\n{votes} upvotes\n/r/{subreddit}\n{link}'.format( title=title, subreddit=sub, link=link, votes=upvotes) return r2t.send_gif_img(what, url, ext, text) return False