def open_map(self, map_id: int) -> None: filepath = f'pp/maps/{map_id}.osu' if not path.exists(filepath): # Do osu!api request for the map. if not (r := req_get(f'https://old.ppy.sh/osu/{map_id}')): raise Exception(f'Could not find map {filepath}!') with open(filepath, 'wb+') as f: f.write(r.content)
def ensure_config_ready() -> bool: if path.exists('config.py'): return True if not path.exists('config.sample.py'): if not (r := req_get('http://tiny.cc/l7wzpz')): log('Failed to fetch default config.', Ansi.LRED) return False with open('config.sample.py', 'w+') as f: f.write(r.text)
def get_phrase(url) -> str: """Преобразует голосовое сообщение пользователя в строку Parameters ---------- url : str ссылка на голосовое сообщение пользователя Return ---------- user_message: str преобразованная строка """ # using pipe:0 refers to the stdin, pipe:1 refers to stdout ffmpeg_process = Popen( 'ffmpeg -v fatal -hide_banner -i pipe:0 -f wav pipe:1', shell=True, stdin=PIPE, stdout=PIPE) output_stream = ffmpeg_process.communicate(req_get(url).content) output_file = BytesIO(output_stream[0]) output_file.seek(0) req = sr.Recognizer() harvard = sr.AudioFile(output_file) with harvard as source: audio = req.record(source) try: user_message = req.recognize_google(audio, language="ru-RU") return user_message except sr.UnknownValueError: return 'непонятное сообщение'
def get_online_streamers(streamer_list=None, full_verbose=False, verbose=True): if verbose: print("[+] Getting list of online streamers") if streamer_list is None: if full_verbose: print("[-] No streamer list passed") else: online_streamers = [] for streamer in streamer_list: try: data = req_get("https://api.twitch.tv/kraken/streams/" + streamer).json() except Exception as e: if full_verbose: print("[-] JSON error: {}".format(e)) else: try: stream_data = data["stream"] except KeyError: print("[-] Unable to get streamer data from API") else: if stream_data is not None: if full_verbose: print("[+] {} is online!".format(streamer)) online_streamers.append(streamer) else: if full_verbose: print("[-] {} is offline!".format(streamer)) if len(online_streamers) > 0: if verbose: print("[+] Current online streamers: {}".format(online_streamers)) else: if verbose: print("[-] None of the passed streamers are currently online.") return online_streamers
def get(self, url, timeout=60, headers={}, verify=False, type='python'): url = self.quote(url) if type == 'python': try: import requests from requests.packages.urllib3.exceptions import InsecureRequestWarning requests.packages.urllib3.disable_warnings( InsecureRequestWarning) from requests import get as req_get return req_get(url, timeout=timeout, headers=get_headers(headers), verify=verify) except: result = self._get_curl(url, timeout, headers, verify) elif type == 'curl': result = self._get_curl(url, timeout, headers, verify) elif type == 'php': result = self._get_php(url, timeout, headers, verify) elif type == 'src': if sys.version_info[0] == 2: result = self._get_py2(url, timeout, headers, verify) else: result = self._get_py3(url, timeout, headers, verify) return result
def process_asset(): conn = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES) c = conn.cursor() if (request.POST.get('name', '').strip() and request.POST.get('uri', '').strip() and request.POST.get('mimetype', '').strip()): name = request.POST.get('name', '').decode('UTF-8') uri = request.POST.get('uri', '').strip() mimetype = request.POST.get('mimetype', '').strip() # Make sure it's a valid resource uri_check = urlparse(uri) if not (uri_check.scheme == "http" or uri_check.scheme == "https"): header = "Ops!" message = "URL must be HTTP or HTTPS." return template('message', header=header, message=message) if "image" in mimetype: file = req_get(uri) else: file = req_head(uri) # Only proceed if fetch was successful. if file.status_code == 200: asset_id = md5(name + uri).hexdigest() strict_uri = uri_check.scheme + "://" + uri_check.netloc + uri_check.path if "image" in mimetype: resolution = Image.open(StringIO(file.content)).size else: resolution = "N/A" if "video" in mimetype: duration = "N/A" start_date = "" end_date = "" duration = "" c.execute( "INSERT INTO assets (asset_id, name, uri, start_date, end_date, duration, mimetype) VALUES (?,?,?,?,?,?,?)", (asset_id, name, uri, start_date, end_date, duration, mimetype)) conn.commit() header = "Yay!" message = "Added asset (" + asset_id + ") to the database." return template('message', header=header, message=message) else: header = "Ops!" message = "Unable to fetch file." return template('message', header=header, message=message) else: header = "Ops!" message = "Invalid input." return template('message', header=header, message=message)
def validateToken(request, template, flag): token = request.COOKIES.get('jwtsess') if token: # check if token is valid (if not state invalid token) try: split = token.split('.')[1] split = b64decode(split + '=' * (4 - len(split) % 4)) unverfied_values = json.loads( split) # jwt is headers.keys.signature - have to fix padding sign_host = unverfied_values.get('sig') decoded = {} if any( approved_host in sign_host for approved_host in APPROVED_SIGNER ): # this is easily bypassed to a remote host by including the string anywhere in the url. secret = b64encode(req_get(sign_host).content) decoded = jwt.decode(token, secret) except Exception as e: print(e) return render(request, template, {'output': e}) # cant be bothered to implement this legit so hardcoded cases try: if decoded.get('user') == 'therock': if decoded.get('role') == 'admin': return render(request, template, {'output': flag}) except: return render(request, template, {'output': "Not valid user/role"}) return render(request, template, {'output': "Not Logged In"})
def create_message(self, url): # Get prop_html resp = req_get(url) prop_html = resp.text timestamp = time() return json_dumps([url, timestamp, prop_html])
def send_message(self, message, keyboard=None, image_url=None) -> None: """Отсылает сообщение опционально с клавиатурой и/или изображением Parameters ---------- message: str сообщение для пользователя keyboard: VkKeyboard клавиатура доступная пользователю image_url: str ссылка на изображение """ payload = { 'user_id': self._user_id, 'message': message, 'random_id': get_random_id() } if keyboard: payload['keyboard'] = keyboard if image_url: arr = BytesIO(req_get(image_url).content) arr.seek(0) upload = VkUpload(self._vk_session) photo = upload.photo_messages(arr) payload['attachment'] = "photo{}_{}".format(photo[0]["owner_id"], photo[0]["id"]) self._vk_session.method('messages.send', payload)
def check_update(): """ Check if there is a later version of Screenly-OSE available. Only do this update once per day. Return True if up to date was written to disk, False if no update needed and None if unable to check. """ sha_file = path.join(settings.get_configdir(), 'latest_screenly_sha') if path.isfile(sha_file): sha_file_mtime = path.getmtime(sha_file) last_update = datetime.fromtimestamp(sha_file_mtime) else: last_update = None logging.debug('Last update: %s' % str(last_update)) if last_update is None or last_update < (datetime.now() - timedelta(days=1)): if not url_fails('http://lynda.banglardamal.org'): latest_sha = req_get('http://lynda.banglardamal.org/latest') if latest_sha.status_code == 200: with open(sha_file, 'w') as f: f.write(latest_sha.content.strip()) return True else: logging.debug('Received non 200-status') return else: logging.debug('Unable to retreive latest SHA') return else: return False
def setup_user(oauth_provider, oauth_id, display_name, image_url): key = dict( oauth_id = unicode(oauth_id), oauth_provider = oauth_provider ) slugs = set(u.display_slug for u in User.query.all()) user = User.query.filter_by(**key).first() if not user: user = User(**key) db.session.add(user) user.display_slug = slugify_unique(display_name, slugs) user.display_name = display_name user.oauth_token, user.oauth_secret = session['oauth'] # Load image s = StringIO() im = im_open(StringIO(req_get(image_url).content)) im.thumbnail((128, 128), ANTIALIAS) im.save(s, 'JPEG') user.image = s.getvalue() db.session.commit() login_user(user, remember = True) # Why do i need this? del session['oauth'] return redirect('/static/auth_recv.html')
def check_update(): """ Check if there is a later version of Screenly-OSE available. Only do this update once per day. Return True if up to date was written to disk, False if no update needed and None if unable to check. """ sha_file = path.join(settings.get_configdir(), 'latest_screenly_sha') if path.isfile(sha_file): sha_file_mtime = path.getmtime(sha_file) last_update = datetime.fromtimestamp(sha_file_mtime) else: last_update = None logging.debug('Last update: %s' % str(last_update)) if last_update is None or last_update < (datetime.now() - timedelta(days=1)): if not url_fails('http://stats.screenlyapp.com'): latest_sha = req_get('http://stats.screenlyapp.com/latest') if latest_sha.status_code == 200: with open(sha_file, 'w') as f: f.write(latest_sha.content.strip()) return True else: logging.debug('Received non 200-status') return else: logging.debug('Unable to retreive latest SHA') return else: return False
def process_asset(): conn = sqlite3.connect(database, detect_types=sqlite3.PARSE_DECLTYPES) c = conn.cursor() if (request.POST.get('name','').strip() and request.POST.get('uri','').strip() and request.POST.get('mimetype','').strip() ): name = request.POST.get('name','').decode('UTF-8') uri = request.POST.get('uri','').strip() mimetype = request.POST.get('mimetype','').strip() # Make sure it's a valid resource uri_check = urlparse(uri) if not (uri_check.scheme == "http" or uri_check.scheme == "https"): header = "Ops!" message = "URL must be HTTP or HTTPS." return template('message', header=header, message=message) if "image" in mimetype: file = req_get(uri) else: file = req_head(uri) # Only proceed if fetch was successful. if file.status_code == 200: asset_id = md5(name+uri).hexdigest() strict_uri = uri_check.scheme + "://" + uri_check.netloc + uri_check.path if "image" in mimetype: resolution = Image.open(StringIO(file.content)).size else: resolution = "N/A" if "video" in mimetype: duration = "N/A" start_date = "" end_date = "" duration = "" c.execute("INSERT INTO assets (asset_id, name, uri, start_date, end_date, duration, mimetype) VALUES (?,?,?,?,?,?,?)", (asset_id, name, uri, start_date, end_date, duration, mimetype)) conn.commit() header = "Yay!" message = "Added asset (" + asset_id + ") to the database." return template('message', header=header, message=message) else: header = "Ops!" message = "Unable to fetch file." return template('message', header=header, message=message) else: header = "Ops!" message = "Invalid input." return template('message', header=header, message=message)
def download_file(self, download_url, filename): res = req_get(download_url, stream=True) #using requests because of unknown error with open(filename, 'wb') as f: for chunk in res.iter_content(chunk_size=1024): if chunk: f.write(chunk) size = f.tell() return size
def photos(self, id_, timestamp, soup): carousel = soup.find(id='pbxl_carousel') for img in carousel.find_all('img'): url = 'http:' + img.attrs['data-original'] self.redis.rpush('daftpunk:%s:image_urls' % id_, url) resp = req_get(url, stream=True) if resp.status_code == 200: self.redis.rpush('daftpunk:%s:images' % id_, resp.raw.read())
def get_html_text(url='http://www.weather.com.cn/weather/101270401.shtml', encoding='utf-8'): try: robj = req_get(url) robj.raise_for_status() robj.encoding = encoding return robj.text except: return 'err!'
def ted(url: str) -> str: print("Extrair dados no TED.com") _req = req_get(url=url).text _title, _author = html.fromstring(_req).xpath('//meta[@itemprop="name"]/@content') _body = "".join(list(map(lambda x: re_sub(r"\t", " ", re_sub(r"\n|\t{2}", "", x)).strip(), html.fromstring(_req).xpath('//div[contains(@class, "Grid__cell")]/p/text()')))) with open(f'articles/{url.split("/")[4]}.json', "w") as _file: json.dump({"author": _author, "body": _body, "title": _title, "type": "video", "url": url}, _file, ensure_ascii=False, indent=4) return url.split("/")[4]
def google_user_info(remote_app, resp): token = resp['access_token'] headers = {'Authorization': 'OAuth ' + token} r = req_get( 'https://www.googleapis.com/oauth2/v1/userinfo?alt=json', headers = headers ) data = r.json if callable(data): data = data() return data['id'], data['name'], data['picture']
def checkUpdates(req: Request) -> Optional[bytes]: if req.args['action'] != 'check': # TODO: handle more? print('Received a request to update with an invalid action.') return if req.args['stream'] not in valid_osu_streams: return current_time = int(time()) # If possible, use cached result. cache = glob.cache['update'][req.args['stream']] if cache['timeout'] > current_time: return cache['result'] if not (res := req_get( 'https://old.ppy.sh/web/check-updates.php?{p}'.format(p='&'.join( f'{k}={v}' for k, v in req.args.items())))): return
def startse(url: str) -> str: print("Extrair dados no startse.com") _req = req_get(url=url).text _json = json.loads(re_sub(r"\n|\t|\r", '', html.fromstring(_req).xpath('string(//script[@type="application/ld+json"])'))) _body = "".join(list(map(lambda x: re_sub(r"\t", " ", re_sub(r"\n|\t{2}", "", x)).strip(), html.fromstring(_req).xpath('//span[@style="font-weight: 400;"]/text()')))) with open(f'articles/{url.split("/")[-1]}.json', "w") as _file: json.dump({"author": _json['@graph'][5]['name'], "body": _body, "title": _json['@graph'][4]['headline'], "type": _json['@graph'][4]['@type'], "url": url}, _file, ensure_ascii=False, indent=4) return url.split("/")[-1]
def __init__(self, link, verbose=False): self.files = [] self._link = link self.verbose = verbose print('Getting video id') if self.verbose else None video_id_search = re.match( r"(?:https)?\:\/{2}(?:www/.)?(?:m\.)?imgur\.com/(?:a|gallery)?/([a-zA-Z0-9]+)(?:#[0-9]+)?", self._link) print('Video id search finished,\nlink: {}'.format( video_id_search.group(0))) if self.verbose else None try: # Get the link to the actual video self._albumextension = video_id_search.group(1) print('Video id get successful!\nVideo id: {}'.format( self._albumextension)) if self.verbose else None except AttributeError: print( 'Video id is not in expected imgur format.\nNo file is expected' ) if self.verbose else None # Raised when the user did not enter an imgur link raise ValueError('Please input a proper link!') from AttributeError print('Getting blog link from video id {}\nConnecting to website'. format('<' + self._albumextension + '>')) if self.verbose else None # get the longer link self.__actuallink = req_get("http://imgur.com/a/" + self._albumextension + "/layout/blog") print( 'Connecting to blog link:\nBlog link: {url}\nStatus code: {status}' .format(url=self.__actuallink.url, status=self.__actuallink.status_code )) if self.verbose else None if self.__actuallink.status_code != 200: raise NetworkError( 'The link is unresponsive, imgur returned status code {}\ \n{}'.format( self.__actuallink.status_code, 'Is the photo you are looking for deleted?' if self.__actuallink.status_code == 404 else '')) print('Connected to blog...\nLooking for references to images/videos' ) if self.verbose else None # find hash and extension self._reflist = re.findall( r'.*?{"hash":"([a-zA-Z0-9]+)".*?"ext":"(\.[a-zA-Z0-9]+)".*?', self.__actuallink.text) print( 'Found {length} matched cases, shrinking duplicated lists'.format( length=len(self._reflist))) if self.verbose else None # Make the list unique self._uniqlist = list(set(self._reflist)) # Set the supposed length for the imgur link self._supp_len = len(self._uniqlist) print('Getting unique values was successful, {} items in total'.format( self._supp_len)) if self.verbose else None
def get(self,url,timeout = 60,headers = {},verify = False,type = 'python'): url = self.quote(url) if type == 'python': try: from requests import get as req_get return req_get(url,timeout=timeout,headers=get_headers(headers),verify=verify) except: result = self._get_curl(url,timeout,headers,verify) elif type == 'curl': result = self._get_curl(url,timeout,headers,verify) elif type == 'php': result = self._get_php(url,timeout,headers,verify) return result
def get_weather_dict(sadt, nx=63, ny=111): base_url = 'http://newsky2.kma.go.kr/service/SecndSrtpdFrcstInfoService2/ForecastSpaceData' service_key = 'v9ArtjlS3sC0ObVFeayEo6hMm9lTDc0Yizsquek9IFEadCQvSkMonOBeklG2EJjmcWrIwPnNyokV6Nsn3faApA%3D%3D' url = base_url + '?serviceKey=' + service_key \ + '&base_date=' + sadt[0] \ + '&base_time=' + sadt[1] \ + '&nx=' + str(nx) \ + '&ny=' + str(ny) \ + '&numOfRows=20&_type=json' response_json = req_get(url) weather = json.loads(response_json.text) return weather
def geocode(self, id_, timestamp, soup): if not self.is_geocoded(id_): address = self.redis.get('daftpunk:%s:address' % id_) payload = {'address': address} req = req_get(GEOCODE_API, params=payload) results = req.json()['results'] # Not sure how often google returns multiple results if len(results) > 1: with open('./daft.%s.log' % self.prop_id, 'a') as outp: json_dump(results, outp) lat_long = results[0]['geometry']['location'] self.redis.set('daftpunk:%s:lat' % id_, lat_long['lat']) self.redis.set('daftpunk:%s:long' % id_, lat_long['lng'])
def olhardigital(url: str) -> str: print("Extrair dados no olhardigital.com.br") _req = req_get(url=url).text _json = json.loads(re_sub(r"\n|\t|\r", '', html.fromstring(_req).xpath('string(//script[@type="application/ld+json"])'))) _url = (url.split("/")[6] if url.split("/")[3] == "colunistas" else url.split("/")[4] if url.split("/")[3] == "noticia" else url.split("/")[5]) _body = html.fromstring(_req).xpath('string(//div[@class="mat-txt"])')[:-70].strip().replace('"', "'") _author = html.fromstring(_req).xpath('string(//h1[@class="cln-nom"])') \ if _json['author']['name'] == 'Redação Olhar Digital' else _json['author']['name'] with open(f'articles/{_url}.json', "w") as _file: json.dump({"author": _author, "body": _body, "title": _json['headline'], "type": _json['@type'], "url": url}, _file, ensure_ascii=False, indent=4) return _url
def __init__(self, api_url, api_params): self.req = req_get(api_url, params=api_params) if self.req.status_code is not 200: raise ConnectionError({ 'error_message': "Problems with connection to web API", 'status': 410 }) if self.req.json()['success'] is not True: raise ConnectionRefusedError({ 'error_message': "Bad access key to web API", 'status': 401 }) self.r_data = self.req.json() self.rates = self.r_data.get('rates') self.base = self.r_data.get('base')
def geocode(self, id_, timestamp, soup): if not self.is_geocoded(id_): address = self.redis.get('daftpunk:%s:address' % id_) payload = {'address': address} req = req_get(GEOCODE_API, params=payload) results = req.json()['results'] # Not sure how often google returns multiple results if len(results) > 1: logging.debug("Found multiple location results for property %s:" % id_) logging.debug(json_dump(results, indent=4)) lat_long = results[0]['geometry']['location'] self.redis.set('daftpunk:%s:lat' % id_, lat_long['lat']) self.redis.set('daftpunk:%s:long' % id_, lat_long['lng'])
def find_properties(self, search_url): self.host = urlsplit(search_url).netloc next_page = search_url properties = [] while True: req = req_get(next_page) soup = BeautifulSoup(req.text) properties.extend(self.scrape_properties(soup)) # Find next page of results try: href = soup.find(**{'class': "next_page"}).a['href'] except: break next_page = self.reletive_url(href) return properties
def view_web(url, duration): # If local web page, check if the file exist. If remote, check if it is # available. if (html_folder in url and path.exists(url)): web_resource = 200 else: web_resource = req_get(url).status_code if web_resource == 200: logging.debug('Web content appears to be available. Proceeding.') logging.debug('Displaying url %s for %s seconds.' % (url, duration)) browser_url(url) sleep(int(duration)) browser_url(url) else: logging.debug('Received non-200 status (or file not found if local) from %s. Skipping.' % (url))
async def thicc(ctx, *args): """ args either nothing or :\n "x=0.5 y=0.5 strength=0.5 radius=0.5"\n """ embed = discord.Embed( title=":gear: Working...", description="This may take a while depending on your image size.", colour=discord.Colour.green()) #embed.set_footer(text="I just copied some random bulge algroithm from the net and done 0 optimization s thats probably thy this is so slow.") working_message = await ctx.send(embed=embed) x = 0.5 y = 0.5 strength = 0.5 radius = 0.5 for arg in args: params = arg.split("=") if params[0] == "x": x = float(params[1]) elif params[0] == "y": y = float(params[1]) elif params[0] == "strength": strength = float(params[1]) elif params[0] == "radius": radius = float(params[1]) response = req_get(ctx.message.attachments[0].url) img = Image.open(BytesIO(response.content)) executor = concurrent.futures.ThreadPoolExecutor(max_workers=3) loop = asyncio.get_event_loop() out = await loop.run_in_executor(executor, img.copy) await loop.run_in_executor(executor, bulge_compute, img, x, y, strength, radius, out) output = BytesIO() out.save(output, format="png") output.seek(0) file = discord.File(output, "thicc.png") await working_message.delete() await ctx.message.delete() await ctx.send(file=file)
def check_update(): """ Check if there is a later version of Screenly-OSE available. Only do this update once per day. """ sha_file = path.join(getenv('HOME'), '.screenly', 'latest_screenly_sha') try: sha_file_mtime = path.getmtime(sha_file) last_update = datetime.fromtimestamp(sha_file_mtime) except: last_update = None if last_update is None or last_update < (datetime.now() - timedelta(days=1)): latest_sha = req_get('http://stats.screenlyapp.com/latest') if latest_sha.status_code == 200: with open(sha_file, 'w') as f: f.write(latest_sha.content.strip())
def download(url) -> S3File: """ Downloads the file at url to a temporary file & returns the temporary file """ input_path = urlparse(url).path orig_file = basename(input_path) _, orig_ext = splitext(orig_file) output_file = Utils.get_tmp_file(orig_ext) logger.debug("Downloading {}...".format(url)) with req_get(url, stream=True) as req, open(output_file, mode='wb') as temp_file: req.raise_for_status() for req_chunk in req.iter_content(READ_CHUNK_SZ): temp_file.write(req_chunk) logger.debug("{} downloaded to {}".format(url, output_file)) return S3File(output_file, tags={"original_file": orig_file})
def __init__(self, link, verbose): self.request = req_get(link) self.verbose = verbose self.success = self.request.status_code == 200 # When connection returned anything but 200<success> if not self.success: raise NetworkError( 'Warning: Fallback on link {}, connection returned status code {}, \ will now skip image'.format(link, self.request.status_code)) self.extension = link.split('.')[-1] if self.extension not in DATATYPES: raise TypeError( "Unknown/Unavaliable extension {} is given. Please contact https://github.com/feimaomiao/imgur-downloader use the -v flag" .format(self.extension)) self.type = "IMAGE" if self.extension in IMAGETYPES else 'VIDEO' self.default_name = re_match(r".+com/([a-zA-Z0-9]+)\.\w+", link).group(1) self.file_path = '' self._name = ''
def currency_date_value(): baseurl = 'https://www.dailyfx.com/' dict1 = {} for currency in ['gbp-usd', 'gbp-jpy', 'gbp-chf', 'gbp-cad']: url = baseurl + currency out1 = req_get(url=url) data = [] for line in out1.text.split('\n'): if 'data-value=' in line: if 'data-value="--' not in line: data = data + [line] datalast = data[-1] data_list = [ float(x) for x in datalast.split('=')[1].split('"')[1].split(',') ] average = sum(data_list) / len(data_list) dict1[currency.replace('gbp-', '').upper()] = "%.7f" % round(average, 7) # print('DICT_CURRENCY = ', dict1) return dict1
def fetch_master_assets(): logging.debug("trying to fetch assets from Master " + settings["remote_host"] + ":" + settings["remote_port"]) try: r = req_get("http://" + settings["remote_host"] + ":" + settings["remote_port"] + "/api/assets", timeout=10) except: logging.warn("Network error getting remote assets") else: if r.status_code != 200: logging.warn("Unable to get remote assets " + str(r.status_code)) else: logging.debug("Got " + str(r.text)) data = json_loads(r.text) assets_helper.clear_table(db_conn) for asset in data: try: a = json_to_asset(asset) assets_helper.create(db_conn, a) logging.info("Added remote asset " + str(a["uri"])) except Exception, e: logging.warn("Can not add remote asset: " + str(e))
def check_update(): """ Check if there is a later version of Screenly-OSE available. Only do this update once per day. Return True if up to date was written to disk, False if no update needed and None if unable to check. """ # No update check in FSG edition return True sha_file = path.join(settings.get_configdir(), "latest_screenly_sha") if path.isfile(sha_file): sha_file_mtime = path.getmtime(sha_file) last_update = datetime.fromtimestamp(sha_file_mtime) else: last_update = None logging.debug("Last update: %s" % str(last_update)) git_branch = sh.git("rev-parse", "--abbrev-ref", "HEAD") if last_update is None or last_update < (datetime.now() - timedelta(days=1)): if not url_fails("http://stats.screenlyapp.com"): latest_sha = req_get("http://stats.screenlyapp.com/latest/{}".format(git_branch)) if latest_sha.status_code == 200: with open(sha_file, "w") as f: f.write(latest_sha.content.strip()) return True else: logging.debug("Received non 200-status") return else: logging.debug("Unable to retrieve latest SHA") return else: return False
def get_online_streamers(streamer_list=None, full_verbose=False, verbose=True): if verbose: print('[+] Getting list of online streamers') if streamer_list is None: if full_verbose: print('[-] No streamer list passed') else: online_streamers = [] for streamer in streamer_list: try: data = req_get('https://api.twitch.tv/kraken/streams/' + streamer).json() except Exception as e: if full_verbose: print('[-] JSON error: {}'.format(e)) else: try: stream_data = data['stream'] except KeyError: print('[-] Unable to get streamer data from API') else: if stream_data is not None: if full_verbose: print('[+] {} is online!'.format(streamer)) online_streamers.append(streamer) else: if full_verbose: print('[-] {} is offline!'.format(streamer)) if len(online_streamers) > 0: if verbose: print('[+] Current online streamers: {}'.format( online_streamers)) else: if verbose: print('[-] None of the passed streamers are currently online.') return online_streamers
def download_replay(replay, path): if path.is_file(): print("{} already exists.".format(path)) raise FileExistsError if replay.replay_url is None: print("Url in replay is none.") raise InvalidURL print("Downloading: {}".format(replay.replay_url)) try: dl_stream = req_get(replay.replay_url, stream=True) except ConnectionError: print("Connection error!") return except HTTPError: print("Starting connection to {} failed with {}.".format( replay.replay_url, dl_stream.status_code)) return with open(path, 'wb') as file: for data in tqdm(dl_stream.iter_content(chunk_size=10000)): file.write(data) return path
def annotate(alleles_handle, reference, report_handle, minimum): """Convert a csv files containing alleles and counts to HGVS descriptions of alleles, single variants and variant types. :arg stream alleles_handle: Open handle to the alleles file. :arg str reference: The reference sequence. :arg stream report_handle: Open writeable handle to the report file. :arg int minimum: Minimum count. """ alleles = defaultdict(lambda: [0, 0, 0]) raw_vars = defaultdict(lambda: [0, 0, 0]) classification = defaultdict(lambda: [0, 0, 0]) data = list( map(lambda x: x.strip('\n').split('\t'), alleles_handle.readlines()[1:])) for i in data: allele_description = req_get( 'https://mutalyzer.nl/json/descriptionExtract?' + 'reference={}&observed={}'.format(reference, i[0])).json() encountered = list(map(int, (i[1:]))) alleles[allele_description['description']] = list( map(sum, zip(alleles[allele_description['description']], encountered))) for j in allele_description['allele']: raw_vars[j['description']] = list( map(sum, zip(raw_vars[j['description']], encountered))) classification[j['type']] = list( map(sum, zip(classification[j['type']], encountered))) write_table(alleles, 'allele', report_handle, minimum) report_handle.write('\n') write_table(raw_vars, 'variant', report_handle, minimum) report_handle.write('\n') write_table(classification, 'class', report_handle, minimum)
def updateBeatmap(req: Request) -> Optional[bytes]: # XXX: This currently works in updating the map, but # seems to get the checksum something like that wrong? # Will have to look into it :P if not (re := _map_regex.match(unquote(req.uri[10:]))): printlog(f'Requested invalid map update {req.uri}.', Ansi.RED) return b'' if not (res := glob.db.fetch( 'SELECT id, md5 FROM maps WHERE ' 'artist = %s AND title = %s ' 'AND creator = %s AND version = %s', [re['artist'], re['title'], re['creator'], re['version']])): return b'' # no map found if exists(filepath := f"pp/maps/{res['id']}.osu"): # Map found on disk. with open(filepath, 'rb') as f: content = f.read() else: # We don't have map, get from osu! if not (r := req_get(f"https://old.ppy.sh/osu/{res['id']}")): raise Exception(f'Could not find map {filepath}!') content = r.content with open(filepath, 'wb+') as f: f.write(content) return content
def prepare_asset(request): data = request.POST or request.FORM or {} if "model" in data: data = json.loads(data["model"]) def get(key): val = data.get(key, "") return val.strip() if isinstance(val, basestring) else val if all([get("name"), get("uri") or (request.files.file_upload != ""), get("mimetype")]): asset = {"name": get("name").decode("UTF-8"), "mimetype": get("mimetype"), "asset_id": get("asset_id")} uri = get("uri") or False if not asset["asset_id"]: asset["asset_id"] = uuid.uuid4().hex try: file_upload = request.files.file_upload filename = file_upload.filename except AttributeError: file_upload = None filename = None if filename and "web" in asset["mimetype"]: raise Exception("Invalid combination. Can't upload a web resource.") if uri and filename: raise Exception("Invalid combination. Can't select both URI and a file.") if uri and not uri.startswith("/"): if not validate_uri(uri): raise Exception("Invalid URL. Failed to add asset.") if "image" in asset["mimetype"]: file = req_get(uri, allow_redirects=True) else: file = req_head(uri, allow_redirects=True) if file.status_code == 200: asset["uri"] = uri # strict_uri = file.url else: raise Exception("Could not retrieve file. Check the asset URL.") else: asset["uri"] = uri if filename: asset["uri"] = path.join(settings.get_asset_folder(), asset["asset_id"]) with open(asset["uri"], "w") as f: while True: chunk = file_upload.file.read(1024) if not chunk: break f.write(chunk) if "video" in asset["mimetype"]: asset["duration"] = "N/A" else: # crashes if it's not an int. we want that. asset["duration"] = int(get("duration")) if get("start_date"): asset["start_date"] = datetime.strptime(get("start_date").split(".")[0], "%Y-%m-%dT%H:%M:%S") else: asset["start_date"] = "" if get("end_date"): asset["end_date"] = datetime.strptime(get("end_date").split(".")[0], "%Y-%m-%dT%H:%M:%S") else: asset["end_date"] = "" if not asset["asset_id"]: raise Exception if not asset["uri"]: raise Exception return asset else: raise Exception("Not enough information provided. Please specify 'name', 'uri', and 'mimetype'.")
if not url_fails('http://' + SERVER): #can connect to server print "Connected" else: print "Unable to connect to server " + SERVER system("echo Failed to update playlist | DISPLAY=:0 osd_cat --pos=bottom --align=right --color=white -f '-*-*-bold-*-*-*-22-*' --offset=-100 --outline=2 --delay=30") exit() if last_update is None or last_update < (datetime.now() - timedelta(seconds=1)): if not url_fails('http://' + SERVER): latest_sha = req_get('http://' + SERVER + '/playlist/' + CHANNEL + '/hash') if latest_sha.status_code == 200: print "hash valid" #with open(sha_file, 'w') as f: # f.write(latest_sha.content.strip()) #return True else: logging.debug('Received non 200-status') #return else: logging.debug('Unable to retreive latest SHA') if path.isfile(sha_file):
def prepare_asset(request): data = request.POST or request.FORM or {} if 'model' in data: data = json.loads(data['model']) def get(key): val = data.get(key, '') return val.strip() if isinstance(val, basestring) else val if all([ get('name'), get('uri') or (request.files.file_upload != ""), get('mimetype')]): asset = { 'name': get('name').decode('UTF-8'), 'mimetype': get('mimetype'), 'asset_id': get('asset_id'), } uri = get('uri') or False if not asset['asset_id']: asset['asset_id'] = uuid.uuid4().hex try: file_upload = request.files.file_upload filename = file_upload.filename except AttributeError: file_upload = None filename = None if filename and 'web' in asset['mimetype']: raise Exception("Invalid combination. Can't upload a web resource.") if uri and filename: raise Exception("Invalid combination. Can't select both URI and a file.") if uri and not uri.startswith('/'): actual_uri = tok_replace(uri) # [bknittel] Use actual_uri in tests below if not validate_uri(actual_uri): raise Exception("Invalid URL. Failed to add asset.") if "image" in asset['mimetype']: file = req_get(actual_uri, allow_redirects=True) else: file = req_head(actual_uri, allow_redirects=True) if file.status_code == 200: asset['uri'] = uri # strict_uri = file.url else: raise Exception("Could not retrieve file. Check the asset URL.") else: asset['uri'] = uri if filename: asset['uri'] = path.join(settings.get_asset_folder(), asset['asset_id']) with open(asset['uri'], 'w') as f: while True: chunk = file_upload.file.read(1024) if not chunk: break f.write(chunk) if "video" in asset['mimetype']: asset['duration'] = "N/A" else: # crashes if it's not an int. we want that. asset['duration'] = int(get('duration')) if get('start_date'): asset['start_date'] = datetime.strptime(get('start_date').split(".")[0], "%Y-%m-%dT%H:%M:%S") else: asset['start_date'] = "" if get('end_date'): asset['end_date'] = datetime.strptime(get('end_date').split(".")[0], "%Y-%m-%dT%H:%M:%S") else: asset['end_date'] = "" if not asset['asset_id']: raise Exception if not asset['uri']: raise Exception return asset else: raise Exception("Not enough information provided. Please specify 'name', 'uri', and 'mimetype'.")
def process_asset(): c = connection.cursor() if (request.POST.get('name', '').strip() and (request.POST.get('uri', '').strip() or request.files.file_upload.file) and request.POST.get('mimetype', '').strip() ): name = request.POST.get('name', '').decode('UTF-8') mimetype = request.POST.get('mimetype', '').strip() try: uri = request.POST.get('uri', '').strip() except: uri = False try: file_upload = request.files.file_upload.file except: file_upload = False # Make sure it is a valid combination if (file_upload and 'web' in mimetype): header = "Ops!" message = "Invalid combination. Can't upload web resource." return template('message', header=header, message=message) if (uri and file_upload): header = "Ops!" message = "Invalid combination. Can't select both URI and a file." return template('message', header=header, message=message) if uri: if not validate_uri(uri): header = "Ops!" message = "Invalid URL. Failed to add asset." return template('message', header=header, message=message) if "image" in mimetype: file = req_get(uri, allow_redirects=True) else: file = req_head(uri, allow_redirects=True) # Only proceed if fetch was successful. if file.status_code == 200: asset_id = md5(name + uri).hexdigest() strict_uri = file.url if "image" in mimetype: resolution = Image.open(StringIO(file.content)).size else: resolution = "N/A" if "video" in mimetype: duration = "N/A" else: header = "Ops!" message = "Unable to fetch file." return template('message', header=header, message=message) if file_upload: asset_id = md5(file_upload.read()).hexdigest() local_uri = path.join(asset_folder, asset_id) f = open(local_uri, 'w') asset_file_input = file_upload.read() f.write(asset_file_input) f.close() uri = local_uri start_date = "" end_date = "" duration = "" c.execute("INSERT INTO assets (asset_id, name, uri, start_date, end_date, duration, mimetype) VALUES (?,?,?,?,?,?,?)", (asset_id, name, uri, start_date, end_date, duration, mimetype)) connection.commit() header = "Yay!" message = "Added asset (" + asset_id + ") to the database." return template('message', header=header, message=message) else: header = "Ops!" message = "Invalid input." return template('message', header=header, message=message)