Ejemplo n.º 1
0
def bench_requests_futures_async(number_reqs, nb_worker):
    # https://pypi.python.org/pypi/requests-futures
    l=[]

    start = datetime.datetime.now()
    print('Start : ', start)

    def bg_cb(sess, resp):
        # resp.text
        if resp.status_code != requests.codes.ok:
            print(resp.status_code)
            resp.raise_for_status()
        #print(dir(resp))
        l.append(1)
        l_size = len(l)
        print(l_size)
        #print(len(response.body))
        if l_size == number_reqs:
            tornado.ioloop.IOLoop.instance().stop()
        if datetime.datetime.now() - start == 60:
            tornado.ioloop.IOLoop.instance().stop()

    session = FuturesSession( max_workers=10 )
    for elem in range(int(number_reqs/nb_worker)):
        for e in range(nb_worker):
            session.get(
                        "http://www.leboncoin.fr/",
                        background_callback = bg_cb
                        )
        time.sleep(1)
    print('[Rq TURFU] Done :', datetime.datetime.now() - start)
Ejemplo n.º 2
0
def fetchReviews(unique_id):

	s = FuturesSession()
	
	# Hand shake proc. to figure out how many calls we send to server
	api_format = 'https://watcha.net/comment/list?unique_id={unique_id}&start_index={start_index}&count=10&type=like'
	handshake = api_format.format(unique_id=unique_id, start_index=str(0))
	hs = s.get(handshake).result().content
	json_hs = json.loads(hs)
	total_count = int(json_hs['meta']['total_count'])
	how_many_queries = total_count / 10 + 1

	query_urls = [api_format.format(unique_id=unique_id, start_index=str(i * 10)) for i in xrange(0, how_many_queries, 1)]
	reviews = [
		{
			'movie_title': r['movie_title'],
			'rating': r['rating'],
			'text': r['text'],
			'updated_at': time.mktime(dateutil.parser.parse(r['updated_at']).timetuple()),
			'comment_id': r['comment_id']
		}
		for qu in query_urls
		for r in json.loads(s.get(qu).result().content)['data']
	]
	return reviews
Ejemplo n.º 3
0
def home(request, album_key):
    response = cache.get(album_key)

    if response is None:
        session = FuturesSession(max_workers=5)
        session.auth = AlchemyApiAuth(settings.ALCHEMYAPI_KEY)

        futures = []
        source_item, reviews, comment_by_comment_key = get_rdio_comments(album_key)
        for comment_key, comment_text in comment_by_comment_key.iteritems():
            futures.append(start_request(session, comment_key, comment_text))

        sentiment_by_comment_key = complete_requests(futures)
        total_sentiment, per_item_sentiment = aggregate_sentiment(reviews, sentiment_by_comment_key)

        response = {
            'item': source_item,
            'total_sentiment': total_sentiment,
            'per_item_sentiment': per_item_sentiment,
            'sentiment_by_comment_key': sentiment_by_comment_key,
            'comment_by_comment_key': comment_by_comment_key,
        }

        response = json.dumps(response, indent=2)
        cache.set(album_key, response)

    return http.HttpResponse(response,
                             content_type='application/json')
Ejemplo n.º 4
0
    def search(self, q='', cat='', indexer='all', **kwargs):
        self.logger.debug("Searching for %s category %s on indexer %s" % (q, cat, indexer))
        if cat:
            cat = '&cat=' + cat

        sess = FuturesSession(max_workers=8)
        job_list = []

        if indexer == 'all':
            for i in NewznabIndexers.select():
                cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
                u = i.apiurl
                u += cmd
                u = u.replace('o=json', 'o=xml')
                job_list.append(u)
        else:
            for i in NewznabIndexers.select():
                if i.name == indexer:
                    cmd = 'search&q=' + urllib2.quote(q.encode(encoding="UTF-8")) + cat + '&extended=1'
                    u = i.apiurl
                    u += cmd
                    u = u.replace('o=json', 'o=xml')
                    job_list.append(u)

        result = []
        future = []

        for url in job_list:
            try:
                self.logger.debug('Fetching search results from %s' % url)
                t = sess.get(url, timeout=60, headers=self.headers)
            except Exception as e:
                self.logger.error('%s when fetching %s' % (e, url))
                continue

            future.append(t)

        for future in cf.as_completed(future):
            if future.exception() is not None:
                self.logger.error('Failed to fetch results %s' % (future.exception()))
            else:
                f = []
                res = future.result()
                try:
                    provider_res = xmltodict.parse(res.content, attr_prefix='')
                    if provider_res:
                        if 'rss' in provider_res:
                            if 'channel' in provider_res['rss']:
                                    if 'item' in provider_res['rss']['channel']:
                                        f.append(provider_res['rss']['channel'])

                        if 'error' in provider_res:
                            self.logger.debug('%s %s' % (provider_res['rss']['channel']['title'], provider_res['error']['description']))

                except Exception as e:
                    self.logger.error(res.url, e, exc_info=True)

                result.append(f)

        return result
Ejemplo n.º 5
0
def check_result(request):
	"""
		This is NOT A VIEW.
		Returns the job status after querying asynchronously. If finished, returns result.
	"""
	API_KEY = gimmeapikey(request)
	jobid = request.session['jobid']
	payload = {'apikey':API_KEY}
	session = FuturesSession()
	
	try:
		future = session.post('https://api.havenondemand.com/1/job/status/'+jobid, data = payload)
		r = future.result()
	except Exception as e:    # This is the correct syntax
		return 0
		
	# check if response if valid, else return error.
	
	# r.content is a byte array. To cure that, decode utf-8 is used.
	
	response = r.content.decode('utf-8')
	json_data = json.loads(response)
	
	if 'status' in json_data:
		if json_data['status'] == 'finished':
			request.session['extracted_text'] = json_data['actions'][0]['result']['document'][0]['content']
		return json_data['status']
	else:
		return 0
Ejemplo n.º 6
0
class HttpClient(ClientBase):

    def __init__(self, host='localhost', port=8094, tags=None):
        # only import HttpClient's dependencies if using HttpClient
        # if they're not found, inform the user how to install them
        try:
            from requests_futures.sessions import FuturesSession
        except ImportError:
            raise ImportError('pytelegraf[http] must be installed to use HTTP transport')

        super(HttpClient, self).__init__(host, port, tags)

        # the default url path for writing metrics to Telegraf is /write
        self.url = 'http://{host}:{port}/write'.format(host=self.host, port=self.port)

        # create a session to reuse the TCP socket when possible
        self.future_session = FuturesSession()

    def send(self, data):
        """
        Send the data in a separate thread via HTTP POST.
        HTTP introduces some overhead, so to avoid blocking the main thread,
        this issues the request in the background.
        """
        self.future_session.post(url=self.url, data=data)
Ejemplo n.º 7
0
def get_blocks(*heights):
    urls = [get_block_coinsecrets_url(h) for h in heights]
    session = FuturesSession()
    reqs = [session.get(url) for url in urls]
    responses = [r.result() for r in reqs]
    resps_json = [json.loads(r.content.decode()) for r in responses]
    return resps_json
Ejemplo n.º 8
0
    def get_frames(self, count):
        """Get a list of images from Environment Canada."""
        soup = BeautifulSoup(requests.get(self.IMAGES_URL.format(self.station_code)).text, 'html.parser')
        image_links = [tag['href'] for tag in soup.find_all('a') if '.gif' in tag['href']]

        if len([i for i in image_links[:8] if 'COMP' in i]) > 4:
            image_string = '_'.join([self.station_code, 'COMP_PRECIPET', self.get_precip_type() + '.gif'])
        else:
            image_string = '_'.join([self.station_code, 'PRECIPET', self.get_precip_type() + '.gif'])

        images = [tag['href'] for tag in soup.find_all('a') if image_string in tag['href']]

        futures = []
        session = FuturesSession(max_workers=count)

        for i in reversed(images[:count]):
            url = self.FRAME_URL.format(self.station_code, i)
            futures.append(session.get(url=url).result().content)

        def add_layers(frame):
            frame_bytesio = BytesIO()
            base = Image.open(BytesIO(frame)).convert('RGBA')
            base.alpha_composite(self.roads)
            base.alpha_composite(self.cities)
            base.save(frame_bytesio, 'GIF')
            frame_bytesio.seek(0)
            return frame_bytesio.read()

        frames = [add_layers(f) for f in futures if f[0:3] == b'GIF']

        """Repeat last frame."""
        for i in range(0, 2):  # pylint: disable=unused-variable
            frames.append(frames[count - 1])

        return frames
Ejemplo n.º 9
0
    def _chapter_pages(self, soup, html):

        # a <select> tag has options that each points to a page
        neighbour = soup.find('select', id='combobox').find_next_sibling('select')
        opts = neighbour.find_all('option')
        urls = [opt['value'] for opt in opts]

        # Page 1 has already been fetched (stored in this html param, duh!)
        # so let's save ourselves an http request
        pages_htmls = [html]
        urls = urls[1:]
        session = FuturesSession()

        for order, url in enumerate(urls):
            uri = self.netlocs[2] + url
            print(uri)
            res = session.get(uri).result()
            if res.status_code != 200:
                raise HtmlError('cannot fetch')
            pages_htmls.append(res.content)

        returns = []
        for page_html in pages_htmls:
            soup = BeautifulSoup(page_html)
            img_url = soup.find('img', id='mainImg')['src']
            returns.append(img_url)
        return returns
Ejemplo n.º 10
0
class APNsClient(object):
    def __init__(self, cert_file, use_sandbox=False, use_alternative_port=False):
        server = 'api.development.push.apple.com' if use_sandbox else 'api.push.apple.com'
        port = 2197 if use_alternative_port else 443
        self.cert = cert_file
        self.server = "https://{}:{}".format(server, port)
        self.__connection = FuturesSession()
        self.__connection.mount('https://', HTTP20Adapter())

    def send_notification(self, tokens, notification, priority=NotificationPriority.Immediate, topic=None):
        # print(notification.dict())
        json_payload = json.dumps(notification.dict(), ensure_ascii=False, separators=(',', ':')).encode('utf-8')

        headers = {
            'apns-priority': priority.value
        }
        if topic:
            headers['apns-topic'] = topic

        if not isinstance(tokens, list):
            tokens = [tokens]

        for token in tokens:
            url = '{}/3/device/{}'.format(self.server, token)
            self.__connection.post(url, json_payload, headers=headers, cert=self.cert, background_callback=req_callback)
Ejemplo n.º 11
0
    def test_futures_session(self):
        # basic futures get
        sess = FuturesSession()
        future = sess.get(httpbin('get'))
        self.assertIsInstance(future, Future)
        resp = future.result()
        self.assertIsInstance(resp, Response)
        self.assertEqual(200, resp.status_code)

        # non-200, 404
        future = sess.get(httpbin('status/404'))
        resp = future.result()
        self.assertEqual(404, resp.status_code)

        def cb(s, r):
            self.assertIsInstance(s, FuturesSession)
            self.assertIsInstance(r, Response)
            # add the parsed json data to the response
            r.data = r.json()

        future = sess.get(httpbin('get'), background_callback=cb)
        # this should block until complete
        resp = future.result()
        self.assertEqual(200, resp.status_code)
        # make sure the callback was invoked
        self.assertTrue(hasattr(resp, 'data'))

        def rasing_cb(s, r):
            raise Exception('boom')

        future = sess.get(httpbin('get'), background_callback=rasing_cb)
        with self.assertRaises(Exception) as cm:
            resp = future.result()
        self.assertEqual('boom', cm.exception.args[0])
Ejemplo n.º 12
0
 def get_usgs_nearby_cities(self, earthquake):
     """
     performs request on local earthquake nearby cities url and returns the data
     """
     try:
         nearest_cities_object = earthquake[
             "properties"]["products"]["nearby-cities"]
         nearest_cities_url = nearest_cities_object[0][
             "contents"]["nearby-cities.json"]["url"]
     except:
         nearest_cities_url = None
     if nearest_cities_url:
         session = FuturesSession(max_workers=1)
         nearest_cities_response = session.get(
             nearest_cities_url, headers=app.config["API_MANAGER_HEADERS"])
         nearest_cities_details = nearest_cities_response.result().json()
         list_of_nearby_cities = []
         for item in nearest_cities_details:
             city = NearestCity(
                 id=None,
                 distance=item["distance"],
                 direction=item["direction"],
                 name=item["name"],
                 latitude=item["latitude"],
                 longitude=item["longitude"],
                 population=item["population"],
                 earthquake_id=None
             )
             list_of_nearby_cities.append(city)
         earthquake["properties"]["nearest_cities_url"] = nearest_cities_url
         earthquake["properties"]["nearest_cities"] = list_of_nearby_cities
     else:
         earthquake["properties"]["nearest_cities_url"] = None
         earthquake["properties"]["nearest_cities"] = []
     return earthquake
Ejemplo n.º 13
0
def add_list_new() -> None:
    requester = FuturesSession(executor=ProcessPoolExecutor(30), session=requests.session())
    api_key = settings.TBA_API_HEADERS

    team_list_get = lambda p: requester.get(team_by_page_url_template(page=p), headers=api_key)
    team_participation_get = lambda tn: requester.get(team_participation_url_template(team=tn), headers=api_key)

    page_range = get_page_range()

    print("\nStarting %d HTTP requests for team lists, split between %d processes..." % (
        page_range[1] - page_range[0], requester.executor._max_workers))
    team_list_futures = [team_list_get(p) for p in range(*page_range)]
    print("Waiting...")
    wait(team_list_futures)
    print("Done!\n")

    teams_lists = map(lambda f: f.result().json(), team_list_futures)
    teams_data = [item for page_data in teams_lists for item in page_data]
    team_numbers = [*map(lambda t: t['team_number'], teams_data)]

    print("Starting %d HTTP requests for team participation data, split between %d processes..." % (
        len(team_numbers), requester.executor._max_workers))
    team_participation_futures = [team_participation_get(tn) for tn in team_numbers]
    print("Waiting...")
    wait(team_participation_futures)
    print("Done!\n")

    team_participations = map(lambda f: f.result().json(), team_participation_futures)
    arg_list = zip(team_numbers, teams_data, team_participations)

    for args in arg_list:
        add_team(*args)
Ejemplo n.º 14
0
def crowdsource_undetected(related_list, files_path, instructions, data_for):
    # if no files found then return zero
    if not os.listdir(files_path):
        return 0

    # Remove trailing slashes
    files_path = os.path.normpath(files_path)
    # Get an api crowd user
    api_user = get_api_user()
    crowd_user_id = 0
    if api_user and 'Id' in api_user:
        crowd_user_id = api_user['Id']

    # get a crowd job
    crowd_job_id = 0
    if crowd_user_id > 0:
        crowd_job_id = create_api_job(crowd_user_id, os.path.basename(files_path), instructions)
    zip_path = None
    if crowd_job_id > 0:
        # save json object to json file
        if related_list is not None and len(related_list) > 0:
            sio = StringIO()
            json.dump(related_list, sio)
            with open(os.path.join(files_path,'%s.json'%data_for), "w") as fjson:
                fjson.write(sio.getvalue())
        # compress all files in files_path directory
        zip_path = os.path.join(files_path, '%s.zip'%data_for)
        buff = StringIO()
        with zipfile.ZipFile(buff, 'w', zipfile.ZIP_DEFLATED) as zipf:
            print 'zipping ' + zip_path
            zipdir(files_path, zipf)
            print 'zipped ' + zip_path

        session = FuturesSession()
        # api_uri = 'http://api.opescode.com/api/UserData?id=%s' %str(job_api_id)
        api_uri = '{0}/api/UserData?id={1}'.format(service_base_uri, str(crowd_job_id))
        logger.info('Calling web api {0} for {1}'.format(api_uri, zip_path))

        def bg_cb(sess, resp):
            print zip_path, resp.status_code
            # if failed then save the files to the recording physical folder
            if resp.status_code != 200:
                print 'Post file {0} failed with stc={1}'.format(zip_path, str(resp.status_code))
                # For now, I will not log this until I find a better way to pass logger to the callback method. Note: callback method has no access to self
                logger.error('Post file {0} failed with stc={1}'.format(zip_path, str(resp.status_code)))
            else:
                logger.info('%s posted successfully'%zip_path)
        try:
            with open(zip_path, "wb") as f: # use `wb` mode
                print 'saving zip ' + zip_path
                f.write(buff.getvalue())
                print 'zip saved ' + zip_path
            if not archive_only:
                print 'posting ' + zip_path
                session.post(api_uri, files={"archive": buff.getvalue()}, background_callback=bg_cb)
                print 'posted ' + zip_path
            logger.info('posted %s and awaiting api response.'%zip_path)
        except Exception as ex:
            logger.error('Exception occured while calling web api.')
    return crowd_job_id
Ejemplo n.º 15
0
def send_ga_event(event, user):
    session = FuturesSession()
    payload = {
        'v': 1,
        'tid': settings.GOOGLE_TRACKING_ID,
        'uid': google_user_id(user),
        't': 'event',
        'ec': 'email',
        'ea': event.event_type,
        'cm': 'email',
    }
    if event.esp_event:
        payload['ua'] = event.esp_event.get('user-agent')
        payload['dt'] = event.esp_event.get('subject', [None])[0]
        payload['cn'] = event.esp_event.get('campaign_name', None)
        payload['cs'] = event.esp_event.get('campaign_source', None)
        payload['cc'] = payload['el'] = event.esp_event.get(
            'email_id', None)
        payload['dp'] = "%s/%s" % (
            payload['cc'], event.event_type)
    else:
        logger.warn("No ESP event found for event: %s" % event.__dict__)
    logger.info("Sending mail event data Analytics: %s" % payload)
    session.post(
        'https://www.google-analytics.com/collect', data=payload)
Ejemplo n.º 16
0
    def _chapter_pages(self, soup, html):
        # For webtoons, all pages are shown in a single page.
        # When that's the case, there's this element that asks if you want to
        # view page-by-page instead. Let's use this element to check if we're
        # parsing a webtoon chapter.
        webtoon = soup.find("a", href="?supress_webtoon=t")
        if webtoon is not None:
            img_tags = soup.find_all(_page_img_tag)
            return [tag["src"] for tag in img_tags]

        # a <select> tag has options that each points to a page
        opts = soup.find("select", id="page_select").find_all("option")
        urls = [opt["value"] for opt in opts]

        # Page 1 has already been fetched (stored in this html param, duh!)
        # so let's save ourselves an http request
        pages_htmls = [html]
        urls = urls[1:]
        session = FuturesSession()

        for order, url in enumerate(urls):
            res = session.get(url).result()
            if res.status_code != 200:
                raise HtmlError("cannot fetch")
            pages_htmls.append(res.content)

        returns = []
        for page_html in pages_htmls:
            soup = BeautifulSoup(page_html)
            img_url = soup.find("img", id="comic_page")["src"]
            returns.append(img_url)
        return returns
Ejemplo n.º 17
0
	def requestPool(parameters, url):
		"""
		Generator that asynchronously processes profile requests and yields profile futures.
		"""
		session = FuturesSession(max_workers=10)
		for parameter in parameters:
			future = session.get(url, params=parameter)
			yield future
Ejemplo n.º 18
0
 def async_next(self, list_url):
     '''utility to dowload like async.io multiple url
     and send them to extract_nexts
     '''
     session = FuturesSession(max_workers=5)
     for url in list_url:
         future = session.get(url)
         future.add_done_callback(self.extract_nexts)
Ejemplo n.º 19
0
def async_requests(locations, site=None):
    session = FuturesSession()
    check_date = datetime.now() + timedelta(hours=-4)
    for location in locations:
        gig = Gigs.select().where(Gigs.location.contains(location)).order_by(Gigs.datetime.desc()).first()
        if (gig is None) or ((datetime.strptime(gig.datetime, '%Y-%m-%d %H:%M') < check_date)):
            url = "https://{}.craigslist.org/search/{}/".format(location, (site or CRAIGSLIST_SITE))
            future = session.get(url, background_callback=insert_callback)
Ejemplo n.º 20
0
def get(request):
    session = FuturesSession(max_workers=1)
    future = next(as_completed([session.get(
        request.url, headers=request.headers, timeout=request.timeout)]))
    if future.exception() is not None:
        return DownloadError(request, future.exception())
    else:
        resp = future.result()
        return HtmlDocument(resp.url, resp.content)
Ejemplo n.º 21
0
    def send_single(receiver, body):
        self = agent_repo.get_byname('~self')

        url = 'http://{0}:8888/incoming'.format(receiver.hostname)
        session = FuturesSession()
        response_future = session.post(url, data={'sender': self.hostname, 'content': body})

        # wait for the response to come in
        response_future.result()
Ejemplo n.º 22
0
def retrieve_users_status(contest_id, handles):
    session = FuturesSession(max_workers=1)
    futures = {}
    for handle in handles:
        futures[handle] = session.get("http://codeforces.com/api/contest.status?contestId=%d&handle=%s" % (contest_id, handle))
    ret = {}
    for handle, future in futures.items():
        response = future.result()
        ret[handle] = response.json()['result']
    return ret
Ejemplo n.º 23
0
def load_formats():
    session = FuturesSession()

    def bg_cb(sess, resp):
        resp.data = utils.load_js_obj_literal(resp.text)

    future = session.get('https://raw.githubusercontent.com/Zarel/Pokemon-Showdown/master/data/formats-data.js',
                         background_callback=bg_cb)
    r = future.result()
    return r.data
 def test_supplied_session(self):
     """ Tests the `session` keyword argument. """
     requests_session = session()
     requests_session.headers['Foo'] = 'bar'
     sess = FuturesSession(session=requests_session)
     future = sess.get(httpbin('headers'))
     self.assertIsInstance(future, Future)
     resp = future.result()
     self.assertIsInstance(resp, Response)
     self.assertEqual(200, resp.status_code)
     self.assertEqual(resp.json()['headers']['Foo'], 'bar')
Ejemplo n.º 25
0
class Scraper(object):
    def __init__(self, url):
        self.url = url
        self.session = FuturesSession(max_workers=100)
        adapter = requests.adapters.HTTPAdapter(pool_connections=100, pool_maxsize=100)
        self.session.mount('http://', adapter)
        self.session.mount('https://', adapter)

    def _extract_image_urls(self, soup):
        for img in soup.findAll("img", src=True):
            yield urljoin(self.url, img["src"])

    def _find_thumbnail_image(self):
        content_type, content = _fetch_url(self.url, session=self.session)
        soup = BeautifulSoup(content)
        image_urls = self._extract_image_urls(soup)
        image_urls = [u for u in image_urls] # turn to list
        image_urls = list(set(image_urls)) # lose duplicates
        image_sizes = _parallel_get_sizes(image_urls, self.session)
        logging.debug('got sizes for {} images'.format(len(image_sizes)))
        # find biggest
        max_area = 0
        max_url = None
        for image_url in image_urls:
            size = image_sizes[image_url]
            if not size:
                continue

            # ignore little images
            area = size[0] * size[1]
            if area < 5000:
                logging.debug('ignore little {}'.format(image_url))
                continue

            # ignore excessively long/wide images
            if max(size) / min(size) > 1.5:
                logging.debug('ignore dimensions {}'.format(image_url))
                continue

            # penalize images with "sprite" in their name
            if 'sprite' in image_url.lower():
                logging.debug('penalizing sprite {}'.format(image_url))
                area /= 10

            if area > max_area:
                max_area = area
                max_url = image_url
        return max_url


    def scrape(self):
        thumbnail_url = self._find_thumbnail_image()
        #thumbnail = _make_thumbnail_from_url(thumbnail_url, referer=self.url)
        return thumbnail_url
Ejemplo n.º 26
0
 def testMakePosts(self):
     s = FuturesSession()
     new_post = json.dumps({'content': 'testing'})
     print new_post
     p = s.post('https://cs242project.herokuapp.com/submitPost', data=new_post)
     res = p.result()
     print res
     print res.content
     r = s.get('https://cs242project.herokuapp.com/getPosts')
     res2 = r.result()
     print res2.content
     self.assertEqual("test", "test")
Ejemplo n.º 27
0
def _async_requests(urls):
    """
    Sends multiple non-blocking requests. Returns
    a list of responses.

    :param urls:
        List of urls
    """

    session = FuturesSession(max_workers=30)
    futures = [session.get(url) for url in urls]
    return [future.result() for future in futures]
Ejemplo n.º 28
0
    def test_redirect(self):
        """ Tests for the ability to cleanly handle redirects. """
        sess = FuturesSession()
        future = sess.get(httpbin('redirect-to?url=get'))
        self.assertIsInstance(future, Future)
        resp = future.result()
        self.assertIsInstance(resp, Response)
        self.assertEqual(200, resp.status_code)

        future = sess.get(httpbin('redirect-to?url=status/404'))
        resp = future.result()
        self.assertEqual(404, resp.status_code)
Ejemplo n.º 29
0
Archivo: scrape.py Proyecto: Pync/Pync
def listings(base_url, needles):
    """ takes the needles as a || seperated list of needles and
    returns a map of neeldes to a list of dictionaries for matches """
    needles = [kw.strip() for kw in needles.split("||")]

    # Prepare the URL for requests
    url = base_url + "/tv/getProgInfo?major={}"
    session = FuturesSession(max_workers=30)

    # initialize our matches
    matches = {}
    for needle in needles:
        matches[needle] = []

    # Check each channel concurrently
    responses = {}
    for i in SCAN_RANGE:
        responses[i] = session.get(url.format(i))

    # Wait on all responses
    for i in SCAN_RANGE:
        responses[i] = responses[i].result()
        log.debug("channel {} has responded".format(i))

    # Filter out non-200 responses
    responses_200 = []
    for i in SCAN_RANGE:
        if responses[i].status_code == 200:
            responses_200.append(responses[i].text)

    # Make nice JSON of listings
    listings = []
    for response in responses_200:
        tmp = json.loads(response)
        tmp = {
            "title": tmp["title"],
            "major": tmp["major"],
            "callsign": tmp["callsign"],
            "duration": tmp["duration"],
            "startTime": tmp["startTime"],
            "isRecording": tmp["isRecording"],
        }
        listings.append(tmp)

    # Map listings to matching needles
    for listing in listings:
        for needle in needles:
            if needle.lower() in listing["title"].lower():
                log.info("Match for {} with {}".format(needle, listing["title"]))
                matches[needle].append(listing)

    return matches
Ejemplo n.º 30
0
 def get_usgs_details_response(self, url):
     """
     performs request on local earthquake details url and returns the data
     """
     session = FuturesSession(max_workers=1)
     usgs_api_details = session.get(
         url, headers=app.config["API_MANAGER_HEADERS"])
     try:
         earthquake_details = usgs_api_details.result().json()
         return earthquake_details
     except requests.exceptions as exception:
         logger.error("%s: %s" % (exception))
         return False
Ejemplo n.º 31
0
import plistlib
import importlib
from importlib import util
from threading import Thread
from platform import system
from urllib.parse import urlparse
import utils.config
import modules.consumerServer
import unicodedata
import pandas
from unidecode import unidecode
from itertools import tee, islice, chain
# asynchronous session.post requests to log server, used by multiple modules
from requests_futures.sessions import FuturesSession

session = FuturesSession()

# ************
# Constants
# ************

WINDOWS = (system() == "Windows")
MAC = (system() == "Darwin")
LINUX = (system() == "Linux")

if WINDOWS:
    import winreg
    from win32com.shell import shell, shellcon
    import win32gui

    RECENT_ITEMS_PATH_WIN = shell.SHGetFolderPath(0, shellcon.CSIDL_RECENT,
Ejemplo n.º 32
0
async def lot_updater():
    global db, limiter, update_id
    while True:
        try:
            await asyncio.sleep(1)
            printer('начало')
            g_actives = google('col_values', 1)
            stamp2 = datetime.now().timestamp()
            session = FuturesSession()
            temp_db = copy.copy(db)
            update_array = []
            update_id += 1
            futures = []

            for i in temp_db:
                lot = db.get(i)
                if lot['action'] == 'Add':
                    google('insert', [i, lot['@cw3auction'][0]])
                    g_actives.insert(2, str(i))
                    db[i]['action'] = 'None'

            for i in temp_db:
                if db[i]['action'] != 'deleted':
                    lot = db.get(i)
                    if lot['update_id'] + 1 < update_id:
                        update_array = []
                        update_id -= 1
                        limiter = 1
                    if lot['update_id'] + 1 == update_id and limiter <= 300:
                        db[i]['update_id'] = update_id
                        update_array.append(i)
                        limiter += 1

            for i in update_array:
                url = 'https://t.me/chatwars3/' + str(i) + '?embed=1'
                futures.append(session.get(url))

            for future in concurrent.futures.as_completed(futures):
                result = former(future.result().content)
                last_time_request()
                if result[0] != 'False':
                    db[result[0]]['@chatwars3'] = result
                    lot_cw3 = db[result[0]]['@cw3auction']
                    if result[2] != lot_cw3[1]:
                        db[result[0]]['action'] = 'Update'
                    if result[3] != '#активен':
                        db[result[0]]['action'] = 'Delete'

            for i in temp_db:
                lot = db.get(i)
                if lot['action'] in ['Update', 'Delete']:
                    try:
                        post = await bot.edit_message_text(
                            lot['@chatwars3'][1],
                            idChannel,
                            lot['@cw3auction'][0],
                            parse_mode='HTML')
                        print_text = 'Пост обновлен'
                        if lot['action'] == 'Update':
                            db[i]['action'] = 'None'
                            db[i]['@cw3auction'] = form_mash(
                                post['message_id'], post['text'])
                        else:
                            google('delete', g_actives.index(str(i)) + 1)
                            g_actives.pop(g_actives.index(str(i)))
                            db[i]['action'] = 'deleted'
                            print_text += ' (закончился) и удален из обновлений'
                    except IndexError and Exception as e:
                        print_text = 'Пост не изменился'
                        search = re.search(
                            'exactly the same as a current content', str(e))
                        if search:
                            print_text += ', потому что точно такой же'
                            if lot['action'] == 'Update':
                                db[i]['@cw3auction'][1] = db[i]['@chatwars3'][
                                    2]
                                db[i]['@cw3auction'][2] = db[i]['@chatwars3'][
                                    3]
                                db[i]['action'] = 'None'
                            else:
                                print_text += ', а еще он закончился и удален из обновлений'
                                google('delete', g_actives.index(str(i)) + 1)
                                g_actives.pop(g_actives.index(str(i)))
                                db[i]['action'] = 'deleted'
                        else:
                            print_text += ' ' + str(e)
                    printer(
                        str(i) + '-' + str(lot['@cw3auction'][0]) + ' ' +
                        print_text)
            limiter = 1
            printer('конец ' + str(datetime.now().timestamp() - stamp2))
            delay = 60 - (time_now() - last_requested)
            if delay >= 0:
                await asyncio.sleep(delay)
        except IndexError and Exception:
            await executive()
Ejemplo n.º 33
0
class ResourceSyncPuSH(object):
    """
    The base class for the publisher, hub and resource. Contains
    methods for reading config files, making http requests, error handling,
    etc.
    """
    def __init__(self):
        """
        Inititalizes the Futures-Requests session with the
        max number of workers and retires.
        """

        # max workers and retries should be configurable?
        self.session = FuturesSession(max_workers=10)
        adapter = HTTPAdapter(max_retries=3)
        self.session.mount("http://", adapter)
        self._start_response = None

        # config parameters
        self.config = {}
        self.config['log_mode'] = ""
        self.config['mimetypes'] = []
        self.config['trusted_publishers'] = []
        self.config['trusted_topics'] = []
        self.config['my_url'] = ""
        self.config['hub_url'] = ""
        self.config['topic_url'] = ""
        self.config['subscribers_file'] = ""
        self.config['server_path'] = ""

        # logging messages
        self.log_msg = {}
        self.log_msg['payload'] = ""
        self.log_msg['msg'] = []
        self.log_msg['link_header'] = ""
        self.log_msg['module'] = ""

    def get_config(self, classname=None):
        """
        Finds and reads the config file. Reads the appropriate config values
        for the classname provided. For eg: if the classname is hub, it will
        read from the [hub] section in the config file.
        """

        if not classname:
            classname = self.__class__.__name__.lower()

        self.log_msg['module'] = classname

        # NOTE: more paths can be added to look for the config files.
        # order of files matter, the config in the first file
        # will be overwritten by the values in the next file.
        cnf_file = []
        cnf_file.extend([
            os.path.join(os.path.dirname(__file__),
                         "../conf/resourcesync_push.ini"),
            "/etc/resourcesync_push.ini",
            "/etc/resourcesync_push/resourcesync_push.ini",
        ])

        # loading values from configuration file
        conf = ConfigParser.ConfigParser()
        conf.read(cnf_file)
        if not conf:
            raise IOError("Unable to read config file")

        if classname == "hub":
            self.get_hub_config(conf)
        elif classname == "publisher":
            self.get_publisher_config(conf)
        elif classname == "subscriber":
            try:
                self.config['my_url'] = conf.get("subscriber", "url")
            except (NoSectionError, NoOptionError):
                print("The url value for subscriber is required \
                      in the config file.")
                raise

        self.get_demo_config(conf)

    def get_demo_config(self, conf):
        """
        Reads the [demo_hub] section from the config file if the
        log mode is set to 'demo'.
        """
        try:
            self.config['log_mode'] = conf.get("general", "log_mode")
        except (NoSectionError, NoOptionError):
            pass

        if not self.config['log_mode'] == "demo":
            return

        try:
            self.config['demo_hub_url'] = conf.get("demo_mode", "hub_url")
        except (NoSectionError, NoOptionError):
            print("Demo log mode requires a hub_url in the \
                  [demo_mode] section")
            raise

        try:
            self.config['demo_topic_url'] = conf.get("demo_mode", "topic_url")
        except (NoSectionError, NoOptionError):
            print("Demo log mode requires a topic_url in the \
                  [demo_mode] section")
            raise
        return

    def get_hub_config(self, conf):
        """
        Reads the [hub] section from the config file.
        """

        try:
            self.config['mimetypes'] = conf.get("hub", "mimetypes")
        except (NoSectionError, NoOptionError):
            # reourcesync hub by default
            self.config['mimetypes'] = "application/xml"

        try:
            self.config['trusted_publishers'] = conf.get(
                "hub", "trusted_publishers")
        except (NoSectionError, NoOptionError):
            # will allow any publisher
            self.config['trusted_publishers'] = []

        try:
            self.config['trusted_topics'] = conf.get("hub", "trusted_topics")
        except (NoSectionError, NoOptionError):
            # will accept any topic
            self.config['trusted_topics'] = []

        try:
            self.config['my_url'] = conf.get("hub", "url")
        except (NoSectionError, NoOptionError):
            print("The url value for hub is required in the config file.")
            raise

        self.config['subscribers_file'] = os.path.join(
            os.path.dirname(__file__), "../db/subscriptions.pk")
        try:
            self.config['subscribers_file'] = conf.get("hub",
                                                       "subscribers_file")
        except (NoSectionError, NoOptionError):
            pass

        if not os.path.isfile(self.config['subscribers_file']):
            open(self.config['subscribers_file'], 'a').close()

        return

    def get_publisher_config(self, conf):
        """
        Reads the [publisher] section in the config file.
        """

        try:
            self.config['my_url'] = conf.get("publisher", "url")
        except (NoSectionError, NoOptionError):
            print("The url value for publisher is required \
                  in the config file.")
            raise

        try:
            self.config['server_path'] = conf.get("publisher", "server_path")
        except (NoSectionError, NoOptionError):
            pass

        try:
            self.config['hub_url'] = conf.get("publisher", "hub_url")
        except (NoSectionError, NoOptionError):
            print("The hub_url value for publisher is required \
                  in the config file.")
            raise

        try:
            self.config['topic_url'] = conf.get("publisher", "topic_url")
        except (NoSectionError, NoOptionError):
            print("The topic_url value for publisher is required \
                  in the config file.")
            raise

    def send(self, url, method='POST', data=None, callback=None, headers=None):
        """
        Performs http post and get requests. Uses futures-requests
        to make (threaded) async requests.
        """

        if method == 'POST':
            return self.session.post(url,
                                     data=data,
                                     background_callback=callback,
                                     headers=headers)
        elif method == 'GET':
            return self.session.get(url, headers=headers)
        elif method == 'HEAD':
            return self.session.head(url, headers=headers)
        else:
            return

    def respond(self, code=200, msg="OK", headers=None):
        """
        Sends the appropriate http status code with an
        error message.
        """

        print("HTTP %s: %s" % (code, msg))

        if not headers:
            headers = []
        if not str(code) == "204":
            headers.append(("Content-Type", "text/html"))

        code = str(code) + " " + HTTP_STATUS_CODE[code]

        self._start_response(code, headers)
        return [msg]

    @staticmethod
    def get_topic_hub_url(link_header):
        """
        Uses the parse_header_links method in requests to parse link
        headers and return the topic and hub urls.
        """

        links = parse_header_links(link_header)
        topic = ""
        hub_url = ""
        for link in links:
            if link.get('rel') == 'self':
                topic = link.get('url')
            elif link.get('rel') == 'hub':
                hub_url = link.get('url')
        return (topic, hub_url)

    def make_link_header(self, hub_url=None, topic_url=None):
        """
        Constructs the resourcesync link header.
        """

        if not hub_url and not topic_url:
            return self.respond(code=400,
                                msg="hub and topic urls are not set \
                                in config file.")
        link_header = []
        link_header.extend(["<", topic_url, ">;rel=", "self", ","])
        link_header.extend([" <", hub_url, ">;rel=", "hub"])
        return "".join(link_header)

    def log(self):
        """
        Log handler. Will send the log info as json to the
        demo hub if log_mode value is set to demo in the config file.
        """
        if self.config['log_mode'] == 'demo':
            headers = {}
            headers['Link'] = self.make_link_header(
                hub_url=self.config['demo_hub_url'],
                topic_url=self.config['demo_topic_url'])
            self.send(self.config['demo_hub_url'],
                      data=json.dumps(self.log_msg),
                      headers=headers)
        else:
            print(self.log_msg)
Ejemplo n.º 34
0
from bs4 import BeautifulSoup
from flask import jsonify
from requests_futures.sessions import FuturesSession
import requests
import sys
import os

num_workers = os.cpu_count() * 2
session = FuturesSession(max_workers=num_workers)


def valid_protocol(url):
    # Validate argument starts with http or https
    return url.startswith('http://') or url.startswith('https://')


def link_check(url):
    try:
        r = requests.get(url)
        soup = BeautifulSoup(r.text, 'html.parser')
        jsonData = []
        urls = []
        futures = []
        for link in soup.find_all('a'):
            link = str(link.get('href'))
            link = link.replace("'", '"')
            if valid_protocol(link):
                urls.append(link)
                futures.append(session.get(urls.pop(0)))
        for future in futures:
            # Wait for results
Ejemplo n.º 35
0
class MsgStore(object):
    """Client library for message store server"""
    def __init__(self, baseurl=_baseurl):
        self.baseurl = baseurl
        self.headers = []
        self.cache_dirty = True
        self.last_sync = time.time()
        self.servertime = 0
        self.session = requests.session()
        self.futures_session = FuturesSession(session=self.session)
        self._get_queue = []
        self._post_queue = []
        self._insert_lock = Lock()
        self._gq_lock = Lock()

    def _sync_headers(self):
        now = time.time()
        if not self.cache_dirty:
            delay = now - self.last_sync
            if (now - self.last_sync) < _cache_expire_time:
                return True
        #print('request headers from ' + self.baseurl)
        r = None
        try:
            r = requests.get(self.baseurl + _server_time,
                             timeout=_default_timeout)
        except (Timeout, ConnectionError, HTTPError):
            return False
        if r.status_code != 200:
            return False
        servertime = json.loads(r.text)['time']
        for h in self.headers:
            if servertime > h.expire:
                self._insert_lock.acquire()
                #print('expiring ' + h.Iraw().decode())
                self.headers.remove(h)
                self._insert_lock.release()
        self.last_sync = time.time()
        try:
            r = self.session.get(self.baseurl + _headers_since +
                                 str(self.servertime))
        except (Timeout, ConnectionError, HTTPError):
            return False
        if r.status_code != 200:
            return False
        self.servertime = servertime
        self.cache_dirty = False
        #remote = sorted(json.loads(r.text)['header_list'],
        #                key=lambda k: int(k[6:14],16), reverse=True)
        remote = json.loads(r.text)['header_list']
        for rstr in reversed(remote):
            rhdr = RawMessageHeader()
            if rhdr._deserialize_header(rstr.encode()):
                self._insert_lock.acquire()
                if rhdr not in self.headers:
                    self.headers.insert(0, rhdr)
                self._insert_lock.release()
        self._insert_lock.acquire()
        self.headers.sort(reverse=True)
        self._insert_lock.release()
        return True

    def get_headers(self):
        self._sync_headers()
        return self.headers

    def get_message(self, hdr, callback=None):
        self._sync_headers()
        if hdr not in self.headers:
            return None
        r = None
        try:
            r = self.session.get(self.baseurl + _download_message +
                                 hdr.Iraw().decode(),
                                 stream=True)
        except (Timeout, ConnectionError, HTTPError):
            return None
        if r.status_code != 200:
            return None
        raw = b''
        for chunk in r:
            raw += chunk
        msg = Message.deserialize(raw)
        if callback:
            return callback(msg)
        return msg

    def _cb_get_async(self, s, r):
        qen = [qe for qe in self._get_queue if qe[0] == r.url]
        cb = qen[0][1]
        self._gq_lock.acquire()
        self._get_queue.remove(qen[0])
        self._gq_lock.release()
        if cb is None:
            return None
        if r.status_code != 200:
            print('Async Reply Error ' + str(r.status_code) + ' ' + r.url)
            return cb(None)
        msg = Message.deserialize(r.text.encode())
        return cb(msg)

    def get_message_async(self, hdr, callback):
        self._sync_headers()
        if hdr not in self.headers:
            return False
        url = self.baseurl + _download_message + hdr.Iraw().decode()
        qentry = (url, callback)
        if qentry in self._get_queue:
            return False
        if len(self._get_queue) > _high_water:
            while len(self._get_queue) > _low_water:
                time.sleep(1)
        self._gq_lock.acquire()
        self._get_queue.append(qentry)
        self._gq_lock.release()
        r = self.futures_session.get(url,
                                     background_callback=self._cb_get_async)
        return True

    def post_message(self, msg):
        if msg in self.headers:
            return
        raw = msg.serialize()
        nhdr = RawMessageHeader.deserialize(raw)
        f = io.StringIO(raw.decode())
        files = {'message': ('message', f)}
        try:
            r = self.session.post(self.baseurl + _upload_message, files=files)
        except (Timeout, ConnectionError, HTTPError):
            return
        if r.status_code != 200:
            return
        self._insert_lock.acquire()
        if nhdr not in self.headers:
            self.headers.insert(0, nhdr)
        self._insert_lock.release()
        self.cache_dirty = True
Ejemplo n.º 36
0
#Esi calling 1.2

import json
import time
import base64
import random
import sys
import webbrowser

from datetime import datetime
from datetime import timedelta

from requests_futures.sessions import FuturesSession

session = FuturesSession(max_workers=90)

scopes = ''
user_agent = 'ESI calling script by Hirmuolio'
config = {}


def load_config(loaded_config):
    global config
    try:
        client_id = loaded_config['client_id']
        client_secret = loaded_config['client_secret']
        config = loaded_config
    except KeyError:
        #Config found but no wanted content
        print(
            '  no client ID or secret found. \nRegister at https://developers.eveonline.com/applications to get them'
Ejemplo n.º 37
0
class SteamData:
    R_URL = "http://store.steampowered.com/appreviews/%d?json=1&filter=recent&start_offset=%d"
    GL_URL = "http://api.steampowered.com/ISteamApps/GetAppList/v0002/?key=STEAMKEY&format=json"
    G_URL = "http://store.steampowered.com/api/appdetails?appids=%d"
    U_URL = "http://api.steampowered.com/ISteamUser/GetPlayerSummaries/v0002/?key=%s&steamids=%s"
    UA_URL = "http://api.steampowered.com/ISteamUserStats/GetPlayerAchievements/v0001/?appid=%d&key=%s&steamid=%s"
    US_URL = "http://api.steampowered.com/ISteamUserStats/GetUserStatsForGame/v0002/?appid=%d&key=%s&steamid=%s"
    UO_URL = " http://api.steampowered.com/IPlayerService/GetOwnedGames/v0001/?key=%s&steamid=%s&format=json"
    UB_URL = "http://api.steampowered.com/ISteamUser/GetPlayerBans/v1/?key=%s&steamids=%s"
    APIKEY = "9F041FB9B406DCC0FD036440C6BC459C"
    authors = set()
    authorsList = []
    currentAppId = -1

    def __init__(self):
        self.client = pymongo.MongoClient()
        self.db = self.client.steam
        self.session = FuturesSession(max_workers=100)
        self.torOff = False

    def offTorBrowser(self):
        self.torOff = True

    def pysocksSetting(self):
        if self.torOff == True:
            return

        socks.set_default_proxy(socks.SOCKS5, "localhost", 9150)
        socket.socket = socks.socksocket
        ipch = 'http://icanhazip.com'
        print(urlopen(ipch).read())

    def newAppList(self):
        appList = requests.get(self.GL_URL).json()["applist"]["apps"]
        return appList

    def getAppids(self):
        return self.db.appList.distinct("appid")

    def updateAppList(self):
        newApplist = self.newAppList()
        newAppset = set([i["appid"] for i in newApplist])
        curAppset = set(self.getAppids())
        addtionalAppids = list(newAppset - curAppset)
        appid_name = {i["appid"]: i["name"] for i in newApplist}
        additionalDoc = [{
            "appid": i,
            "name": appid_name[i]
        } for i in addtionalAppids]
        if len(additionalDoc) == 0:
            return
        self.db.appList.insert_many(additionalDoc)
        print("add AppList below...")
        print("total app count: %d" % self.db.appList.count())

    def __getGameDetailMapper(self, x):
        try:
            return x.json()[list(x.json().keys())[0]]["data"]
        except:
            return

    def getAppDetails(self):
        curAppidsSet = set(self.getAppids())
        updatedDetailsSet = set(self.db.appDetail.distinct('steam_appid'))
        ids2Add = curAppidsSet - updatedDetailsSet
        for idsChunk in list(self.chunks(list(ids2Add), 100)):
            rs = (self.session.get(self.G_URL % (i)) for i in idsChunk)
            myresponse = map(lambda x: x.result(), rs)
            appDetails_ = map(lambda x: self.__getGameDetailMapper(x),
                              myresponse)
            appDetails = [i for i in appDetails_ if i != None]
            yield appDetails

    def updateAppDetail(self):
        for appDetails in self.getAppDetails():
            self.db.appDetail.insert_many(appDetails)
            print("insert %d app details" % len(appDetails))

    def updateAllAppReviews(self):
        appids = self.getAppids()
        for i in appids:
            self.updateAppReviews(i)
            print("appid %d, insert job is complete" % i)
        return

    def reqReviewList(self, appid, rng):
        rs = (self.session.get(self.R_URL % (appid, i)) for i in rng)
        rs_a = list(map(lambda x: x.result(), rs))
        res = [j for i in rs_a if i != None for j in i.json()['reviews']]
        for res_i in res:
            try:
                res_i.update({"appid": appid})
                self.authors.add(res_i['author']['steamid'])
            except:
                continue
        return res

    def getAllReviews(self, appid):
        return self.db.appReview.find({"appid": appid})

    def updateAppReviews(self, appid):
        self.pysocksSetting()
        self.currentAppId = appid
        failCnt = 0
        insufficientCnt = 0
        if self.db.appReview.count({"appid": appid}) == 0:
            print("inserting reviews first time")
            n = 0
            while failCnt < 10:
                rng = range(n, (n + 1) + 80, 20)
                res = self.reqReviewList(appid, rng)
                if len(res) == 0:
                    failCnt += 1
                    continue
                elif len(res) < 100:
                    print("insufficient data")
                    insufficientCnt += 1
                    if insufficientCnt < 10:
                        continue

                n = rng[-1]
                failCnt = 0
                insufficientCnt = 0
                self.db.appReview.insert_many(res)
                print("review size %d" % (self.db.appReview.count()), end="\r")

        else:
            print("insert additional reviews")
            lastUpdatedDt = self.db.appReview.find({
                "appid": appid
            }).sort('timestamp_created', -1)[0]['timestamp_created']
            n = 0
            isEnd = False
            inTheEnd = False
            while not isEnd:
                rng = range(n, (n + 1) + 80, 20)
                res = self.reqReviewList(appid, rng)
                if len(res) == 0:
                    failCnt += 1
                    continue
                elif len(res) < 100:
                    print("insufficient data")
                    insufficientCnt += 1
                    if insufficientCnt < 10:
                        continue

                n = rng[-1]
                failCnt = 0
                insufficientCnt = 0

                m = len(res) - 1

                while True:
                    if m < 0:
                        isEnd = True
                        break
                    if res[m]['timestamp_created'] > lastUpdatedDt:
                        print(res[m]['timestamp_created'], lastUpdatedDt)
                        self.db.appReview.insert_many(res[:m + 1])
                        if inTheEnd:
                            isEnd = True
                        break
                    else:
                        m -= 1
                        inTheEnd = True

        print("insert complete total review of AppID: %d is %d" %
              (appid, self.db.appReview.count({"appid": appid})))
        self.updateUserInfos()
        f = open("reviewUsers.txt", "w")
        for i in list(self.authors):
            f.write(str(i) + "\n")
        f.close()
        self.authors.clear()
        return

    def getUserSummary(self, userlist):
        rs = (self.session.get(self.U_URL % (self.APIKEY, steamid))
              for steamid in userlist)
        t0 = time.time()
        res_ = list(map(lambda x: x.result(), rs))
        status = [i.status_code for i in res_ if i != None]
        print("summary status", status[:5])
        print("summary req time %d" % (time.time() - t0))
        while len(status) == 0:
            self.pysocksSetting()
            rs = (self.session.get(self.U_URL % (self.APIKEY, steamid))
                  for steamid in userlist)
            res_ = list(map(lambda x: x.result(), rs))
            status = [i.status_code for i in res_ if i != None]

        docs = []
        for i, steamid_i in zip(res_, userlist):
            try:
                doc = i.json()["response"]['players'][0]
            except:
                continue
            docs.append(doc)
        return docs

    def getUserOwns(self, userlist):
        rs = (self.session.get(self.UO_URL % (self.APIKEY, steamid))
              for steamid in userlist)
        t0 = time.time()
        res_ = list(map(lambda x: x.result(), rs))
        status = [i.status_code for i in res_ if i != None]
        print("owns status", status[:5])
        print("owns req time %d" % (time.time() - t0))
        while len(status) == 0:
            self.pysocksSetting()
            rs = (self.session.get(self.UO_URL % (self.APIKEY, steamid))
                  for steamid in userlist)
            res_ = list(map(lambda x: x.result(), rs))
            status = [i.status_code for i in res_ if i != None]

        docs = []
        for i, steamid_i in zip(res_, userlist):
            try:
                doc = i.json()["response"]
            except:
                continue
            doc.update({"steamid": steamid_i})
            docs.append(doc)
        return docs

    def getUserAchieve(self, userlist):
        rs = (self.session.get(self.UA_URL %
                               (self.currentAppId, self.APIKEY, steamid))
              for steamid in userlist)
        t0 = time.time()
        res_ = list(map(lambda x: x.result(), rs))
        status = [i.status_code for i in res_ if i != None]
        print("achieve status", status[:5])
        print("achieve req time %d" % (time.time() - t0))
        while len(status) == 0:
            self.pysocksSetting()
            rs = (self.session.get(self.UA_URL %
                                   (self.currentAppId, self.APIKEY, steamid))
                  for steamid in userlist)
            res_ = list(map(lambda x: x.result(), rs))
            status = [i.status_code for i in res_ if i != None]
        docs = []
        for i, steamid_i in zip(res_, userlist):
            try:
                doc = i.json()["playerstats"]
            except:
                continue
            doc.update({"appid": self.currentAppId})
            docs.append(doc)
        return docs

    def getUserBan(self, userlist):
        rs = (self.session.get(self.UB_URL % (self.APIKEY, steamid))
              for steamid in userlist)
        t0 = time.time()
        res_ = list(map(lambda x: x.result(), rs))
        status = [i.status_code for i in res_ if i != None]
        print("ban status", status[:5])
        print("ban req time %d" % (time.time() - t0))
        while len(status) == 0:
            self.pysocksSetting()
            rs = (self.session.get(self.UB_URL % (self.APIKEY, steamid))
                  for steamid in userlist)
            res_ = list(map(lambda x: x.result(), rs))
            status = [i.status_code for i in res_ if i != None]
        docs = []

        for i, steamid_i in zip(res_, userlist):
            try:
                doc = i.json()["players"][0]
            except:
                continue
            doc.update({"appid": self.currentAppId})
            docs.append(doc)
        return docs

    def chunks(self, l, n):
        for i in range(0, len(l), n):
            yield l[i:i + n]

    def replaceUserCollection(self, dataArr, func, updateKeys):
        keysList = list(updateKeys.keys())
        for data in dataArr:
            func({myKey: data[myKey] for myKey in keysList}, data)

    def updateUserInfos(self):
        insertFuncArr = [
            self.db.userSummary.insert_many, self.db.userOwns.insert_many,
            self.db.userAchieve.insert_many, self.db.userBan.insert_many
        ]
        replaceFuncArr = [
            self.db.userSummary.replace_one, self.db.userOwns.replace_one,
            self.db.userAchieve.replace_one, self.db.userBan.replace_one
        ]
        updateKeys = [{
            "steamid": -1
        }, {
            "steamid": -1
        }, {
            "steamid": -1,
            "appid": self.currentAppId
        }, {
            "steamid": -1,
            "appid": self.currentAppId
        }]
        users_inDB = set(self.db.userSummary.distinct("steamid"))
        newUsers = self.authors - users_inDB
        replaceUser = users_inDB & self.authors

        newUsersC = list(self.chunks(list(newUsers), 100))
        replaceUserC = list(self.chunks(list(replaceUser), 100))
        self.pysocksSetting()
        for taskflag, userChunks, funcArr in [
            ("insert", newUsersC, insertFuncArr),
            ("replace", replaceUserC, replaceFuncArr)
        ]:
            for userlist in userChunks:
                us = self.getUserSummary(userlist)  # pk : steamid
                uo = self.getUserOwns(userlist)  # pk : steamid
                ua = self.getUserAchieve(userlist)  # pk : steamid + appid
                ub = self.getUserBan(userlist)  # pk : steamid + appid
                for dataArr, func, updateKey in zip([us, uo, ua, ub], funcArr,
                                                    updateKeys):
                    if len(dataArr) == 0:
                        continue
                    if taskflag == "insert":
                        func(dataArr)
                    else:
                        self.replaceUserCollection(dataArr, func, updateKey)
                print("userSummary Size %d" % self.db.userSummary.count())
                print("userOwns    Size %d" % self.db.userOwns.count())
                print("userAchieve Size %d" % self.db.userAchieve.count())
                print("userBan     Size %d" % self.db.userBan.count())

        return
Ejemplo n.º 38
0
 def __init__(self, host=None):
     self.host = host or self.default_host
     self.session = FuturesSession()
     self.request_precision = 3
Ejemplo n.º 39
0
 def __init__(self, base_url, workers=1, threads=1):
     self.base_url = base_url
     self.session = FuturesSession(executor=ThreadPoolExecutor(
         max_workers=threads))
Ejemplo n.º 40
0
def searchTickets(request, params):
    session = FuturesSession()
    async_list = []
    tic = []
    headers = {
        'user-agent': 'yatse/0.0.1',
        'api-key': settings.API_KEY,
        'api-user': request.user.username
    }
    for Srv in Server.objects.all():
        url = '%s/yatse/' % Srv.url
        # , hooks={'response': do_something}
        req = session.request('SEARCH',
                              url,
                              data=json.dumps(params),
                              headers=headers)
        setattr(req, 'serverName', Srv.name)
        setattr(req, 'serverID', Srv.id)
        setattr(req, 'serverShortName', Srv.short)
        async_list.append(req)

    for req in async_list:
        try:
            result = req.result()
            if result.status_code != 200:
                messages.add_message(
                    request, messages.ERROR,
                    _(u'%s respoded width: %s' %
                      (req.serverName, result.status_code)))

            else:
                data = json.loads(result.content)
                for date in data:
                    date['YATSServer'] = req.serverShortName
                    date['serverID'] = req.serverID
                    date['c_date'] = dateutil.parser.parse(date['c_date'])
                    date['last_action_date'] = dateutil.parser.parse(
                        date['last_action_date'])
                    if is_naive(date['last_action_date']):
                        date['last_action_date'] = make_aware(
                            date['last_action_date'])
                    if 'close_date' in date and date['close_date']:
                        date['close_date'] = dateutil.parser.parse(
                            date['close_date'])
                    date['is_late'] = 0
                    if 'daedline' in date and date['daedline']:
                        date['daedline'] = dateutil.parser.parse(
                            date['daedline'])
                        if date['daedline'] < datetime.date.today():
                            date['is_late'] = 2
                        if date['daedline'] < datetime.date.today(
                        ) + datetime.timedelta(days=7):
                            date['is_late'] = 1

                tic = tic + data

        except:
            messages.add_message(
                request, messages.ERROR,
                _(u'YATS nicht erreichbar: %s' % req.serverName))

    return tic
Ejemplo n.º 41
0
 def __init__(self, lang, voice, url, api_path):
     super(RemoteTTS, self).__init__(lang, voice)
     self.api_path = api_path
     self.url = remove_last_slash(url)
     self.session = FuturesSession()
Ejemplo n.º 42
0
 def get_delayed(self, url, cb_func=None):
     full_url = self.url + url
     if not self._fus:
         self._fus = FuturesSession(max_workers=10)
     return self._fus.get(full_url, background_callback=cb_func, **self._get_kwargs())
Ejemplo n.º 43
0
api_v2 = Blueprint('api_v2', __name__, url_prefix='/api/v2')
rest_api_v2 = Api(api_v2, errors=errors)

ANALYSIS_ACCESS_COUNT_KEY = 'access_count'
TOTAL_COUNT_KEY = 'total_count'

ANALYTICS_API_VERSION = "v2.0"
HOSTNAME = os.environ.get('HOSTNAME', 'bayesian-api')
METRICS_SERVICE_URL = "http://{}:{}".format(
    os.environ.get('METRICS_ACCUMULATOR_HOST', 'metrics-accumulator'),
    os.environ.get('METRICS_ACCUMULATOR_PORT', '5200')
)

worker_count = int(os.getenv('FUTURES_SESSION_WORKER_COUNT', '100'))
_session = FuturesSession(max_workers=worker_count)
_resource_paths = []


@api_v2.route('/readiness')
def readiness():
    """Handle the /readiness REST API call."""
    return jsonify({}), 200


@api_v2.route('/liveness')
def liveness():
    """Handle the /liveness REST API call."""
    return jsonify({}), 200

Ejemplo n.º 44
0
def HTTPInstanceGenerator(action,
                          instance_times,
                          blocking_cli,
                          param_file=None):
    if len(instance_times) == 0:
        return False
    session = FuturesSession(max_workers=15)
    url = base_url + action
    parameters = {'blocking': blocking_cli, 'result': RESULT}
    authentication = (user_pass[0], user_pass[1])
    after_time, before_time = 0, 0

    if param_file == None:
        st = 0
        for t in instance_times:
            # Mark invocation number for each invokation
            if action in action_times:
                action_times[action] = action_times[action] + 1
            else:
                action_times[action] = 0
            invoke_number = str(action_times[action])

            # Initialize before_time at the first invocation
            if before_time == 0:
                before_time = time.time()
            after_time = time.time()
            # Calculate the time need to wait
            st = st + t - (after_time - before_time)
            before_time = time.time()
            if st > 0:
                time.sleep(st)

            logger.info('start,' + action + ',' + invoke_number)
            future = session.post(url,
                                  params=parameters,
                                  auth=authentication,
                                  verify=False)
            logger.info('end,' + action + ',' + invoke_number)

    else:  # if a parameter file is provided
        try:
            param_file_body = param_file_cache[param_file]
        except:
            with open(param_file, 'r') as f:
                param_file_body = json.load(f)
                param_file_cache[param_file] = param_file_body
        st = 0
        for t in instance_times:
            # Mark invocation number for each invokation
            if action in action_times:
                action_times[action] = action_times[action] + 1
            else:
                action_times[action] = 0
            invoke_number = str(action_times[action])

            # Initialize before_time at the first invocation
            if before_time == 0:
                before_time = time.time()
            after_time = time.time()
            # Calculate the time need to wait
            st = st + t - (after_time - before_time)
            before_time = time.time()
            if st > 0:
                time.sleep(st)

            logger.info('start,' + action + ',' + invoke_number)
            future = session.post(url,
                                  params=parameters,
                                  auth=authentication,
                                  json=param_file_body,
                                  verify=False)
            logger.info('end,' + action + ',' + invoke_number)

    return True
Ejemplo n.º 45
0
import socket
import time
import traceback
try:
    from urllib.parse import urlparse
except ImportError:
    from urlparse import urlparse
import uuid

from requests_futures.sessions import FuturesSession
from .silent import SilentExecutor

from ..conf import SPECTRUM_UUID4

cpus = multiprocessing.cpu_count()
session = FuturesSession(executor=SilentExecutor(max_workers=cpus))

socket.setdefaulttimeout(0.1)


def cb(sess, resp):
    """ Do not thing with the callback """
    pass


class BaseSpectrumHandler(logging.Handler):
    def __init__(self, sublevel=None, *args, **kwargs):
        """ Setup """
        self.url = kwargs.pop(
            'url', 'http://127.0.0.1:9000/?spectrum=%s' % SPECTRUM_UUID4)
Ejemplo n.º 46
0
Archivo: dl.py Proyecto: l4rm4nd/pytr
class DL:
    def __init__(self,
                 tr,
                 output_path,
                 filename_fmt,
                 since_timestamp=0,
                 history_file='pytr_history'):
        '''
        tr: api object
        output_path: name of the directory where the downloaded files are saved
        filename_fmt: format string to customize the file names
        since_timestamp: downloaded files since this date (unix timestamp)
        '''
        self.tr = tr
        self.output_path = Path(output_path)
        self.history_file = self.output_path / history_file
        self.filename_fmt = filename_fmt
        self.since_timestamp = since_timestamp

        self.session = FuturesSession()
        self.futures = []

        self.docs_request = 0
        self.done = 0
        self.filepaths = []
        self.doc_urls = []
        self.doc_urls_history = []
        self.tl = Timeline(self.tr)
        self.log = get_logger(__name__)
        self.load_history()

    def load_history(self):
        '''
        Read history file with URLs if it exists, otherwise create empty file
        '''
        if self.history_file.exists():
            with self.history_file.open() as f:
                self.doc_urls_history = f.read().splitlines()
            self.log.info(
                f'Found {len(self.doc_urls_history)} lines in history file')
        else:
            self.history_file.parent.mkdir(exist_ok=True, parents=True)
            self.history_file.touch()
            self.log.info('Created history file')

    async def dl_loop(self):
        await self.tl.get_next_timeline(max_age_timestamp=self.since_timestamp)

        while True:
            try:
                _subscription_id, subscription, response = await self.tr.recv()
            except TradeRepublicError as e:
                self.log.fatal(str(e))

            if subscription['type'] == 'timeline':
                await self.tl.get_next_timeline(
                    response, max_age_timestamp=self.since_timestamp)
            elif subscription['type'] == 'timelineDetail':
                await self.tl.timelineDetail(
                    response, self, max_age_timestamp=self.since_timestamp)
            else:
                self.log.warning(
                    f"unmatched subscription of type '{subscription['type']}':\n{preview(response)}"
                )

    def dl_doc(self, doc, titleText, subtitleText, subfolder=None):
        '''
        send asynchronous request, append future with filepath to self.futures
        '''
        doc_url = doc['action']['payload']

        date = doc['detail']
        iso_date = '-'.join(date.split('.')[::-1])

        # extract time from subtitleText
        time = re.findall('um (\\d+:\\d+) Uhr', subtitleText)
        if time == []:
            time = ''
        else:
            time = f' {time[0]}'

        if subfolder is not None:
            directory = self.output_path / subfolder
        else:
            directory = self.output_path

        # If doc_type is something like 'Kosteninformation 2', then strip the 2 and save it in doc_type_num
        doc_type = doc['title'].rsplit(' ')
        if doc_type[-1].isnumeric() is True:
            doc_type_num = f' {doc_type.pop()}'
        else:
            doc_type_num = ''

        doc_type = ' '.join(doc_type)
        titleText = titleText.replace('\n', '').replace('/', '-')
        subtitleText = subtitleText.replace('\n', '').replace('/', '-')

        filename = self.filename_fmt.format(iso_date=iso_date,
                                            time=time,
                                            title=titleText,
                                            subtitle=subtitleText,
                                            doc_num=doc_type_num)
        if doc_type in ['Kontoauszug', 'Depotauszug']:
            filepath = directory / 'Abschlüsse' / f'{filename}' / f'{doc_type}.pdf'
        else:
            filepath = directory / doc_type / f'{filename}.pdf'

        filepath = sanitize_filepath(filepath, '_', 'auto')

        if filepath in self.filepaths:
            self.log.debug(f'File {filepath} already in queue. Skipping...')
            return
        else:
            self.filepaths.append(filepath)

        if filepath.is_file() is False:
            doc_url_base = doc_url.split('?')[0]
            if doc_url_base in self.doc_urls:
                self.log.debug(
                    f'URL {doc_url_base} already in queue. Skipping...')
                return
            elif doc_url_base in self.doc_urls_history:
                self.log.debug(
                    f'URL {doc_url_base} already in history. Skipping...')
                return
            else:
                self.doc_urls.append(doc_url_base)

            future = self.session.get(doc_url)
            future.filepath = filepath
            future.doc_url_base = doc_url_base
            self.futures.append(future)
            self.log.debug(f'Added {filepath} to queue')
        else:
            self.log.debug(f'file {filepath} already exists. Skipping...')

    def work_responses(self):
        '''
        process responses of async requests
        '''
        if len(self.doc_urls) == 0:
            self.log.info('Nothing to download')
            exit(0)

        with self.history_file.open('a') as history_file:
            self.log.info('Waiting for downloads to complete..')
            for future in as_completed(self.futures):
                if future.filepath.is_file() is True:
                    self.log.debug(
                        f'file {future.filepath} was already downloaded.')

                r = future.result()
                future.filepath.parent.mkdir(parents=True, exist_ok=True)
                with open(future.filepath, 'wb') as f:
                    f.write(r.content)
                    self.done += 1
                    history_file.write(f'{future.doc_url_base}\n')

                    self.log.debug(
                        f'{self.done:>3}/{len(self.doc_urls)} {future.filepath.name}'
                    )

                if self.done == len(self.doc_urls):
                    self.log.info('Done.')
                    exit(0)
Ejemplo n.º 47
0
        f.write(str(Response))
        f.write("\n")
        f.close()

    except req.exceptions.ConnectionError:
        print(colored("[-] Url Not Alive -->", 'red'), Response)
        pass
    url_0()

if url_file:
    file = open(url_file, "r")
    for line in file:
        if not urlparse(line).scheme:
            line = "https://" + line
        urls.append(line.strip())
    with FuturesSession() as session:
        futures = [
            session.get(url,
                        headers={
                            'User-Agent':
                            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6)'
                        }) for url in urls
        ]
        for f, u in zip(futures, urls):
            try:

                print(
                    colored(
                        " URL Alive    ----> {} : {} ".format(
                            u,
                            f.result().status_code), 'green'))
class DatadogHTTPClient(object):
    """
    Client that sends a batch of logs over HTTP.
    """

    _POST = "POST"
    if DD_USE_COMPRESSION:
        _HEADERS = {"Content-type": "application/json", "Content-Encoding": "gzip"}
    else:
        _HEADERS = {"Content-type": "application/json"}

    def __init__(
        self, host, port, no_ssl, skip_ssl_validation, api_key, scrubber, timeout=10
    ):
        protocol = "http" if no_ssl else "https"
        self._url = "{}://{}:{}/v1/input/{}".format(protocol, host, port, api_key)
        self._scrubber = scrubber
        self._timeout = timeout
        self._session = None
        self._ssl_validation = not skip_ssl_validation
        self._futures = []
        if logger.isEnabledFor(logging.DEBUG):
            logger.debug(
                f"Initialized http client for logs intake: "
                f"<host: {host}, port: {port}, url: {self._url}, no_ssl: {no_ssl}, "
                f"skip_ssl_validation: {skip_ssl_validation}, timeout: {timeout}>"
            )

    def _connect(self):
        self._session = FuturesSession(max_workers=DD_MAX_WORKERS)
        self._session.headers.update(self._HEADERS)

    def _close(self):
        # Resolve all the futures and log exceptions if any
        for future in as_completed(self._futures):
            try:
                future.result()
            except Exception:
                logger.exception("Exception while forwarding logs")

        self._session.close()

    def send(self, logs):
        """
        Sends a batch of log, only retry on server and network errors.
        """
        try:
            data = self._scrubber.scrub("[{}]".format(",".join(logs)))
        except ScrubbingException:
            raise Exception("could not scrub the payload")
        if DD_USE_COMPRESSION:
            data = compress_logs(data, DD_COMPRESSION_LEVEL)

        # FuturesSession returns immediately with a future object
        future = self._session.post(
            self._url, data, timeout=self._timeout, verify=self._ssl_validation
        )
        self._futures.append(future)

    def __enter__(self):
        self._connect()
        return self

    def __exit__(self, ex_type, ex_value, traceback):
        self._close()
Ejemplo n.º 49
0
    def step(self, local_phasors, reference_phasors, phasor_target):
        self.iteration_counter += 1
        print(self.iteration_counter)

        if phasor_target is None and self.Vang_targ == "initialize":
            print("Iteration", self.iteration_counter,
                  ": No target received by SPBC")
            return

        else:
            if phasor_target is None:
                print(
                    "Iteration", self.iteration_counter,
                    ": No target received by SPBC: Using last received target")

            else:
                "Data extractions"
                # extract out correct index of phasor target for each phase
                if len(self.phases) == 0:
                    self.phases.append(
                        phasor_target['phasor_targets'][0]['channelName'])
                    self.phase_channels = [0] * len(
                        phasor_target['phasor_targets'])
                if len(self.phase_channels) > 1:
                    self.phases = [0] * len(self.phase_channels)
                    for i in range(len(self.phase_channels)):
                        self.phase_channels[i] = phasor_target[
                            'phasor_targets'][i]['channelName']
                        self.phases[i] = phasor_target['phasor_targets'][i][
                            'channelName']
                    if 'L1' in self.phase_channels:
                        for i, chan in enumerate(self.phase_channels):
                            if chan == 'L1':
                                self.phase_channels[i] = 0
                            if chan == 'L2':
                                self.phase_channels[i] = 1
                            if chan == 'L3':
                                self.phase_channels[i] = 2 - (
                                    3 - len(self.phase_channels))
                    else:
                        for i, chan in enumerate(self.phase_channels):
                            if chan == 'L2':
                                self.phase_channels[i] = 0
                            if chan == 'L3':
                                self.phase_channels[i] = 2 - (
                                    3 - len(self.phase_channels))
                    self.phases = sorted(self.phases)

                if self.Vang_targ == "initialize":
                    self.Vang_targ = np.empty((len(self.phase_channels), 1))
                    self.Vmag_targ = np.empty((len(self.phase_channels), 1))
                    self.kvbase = np.empty((len(self.phase_channels), 1))
                    self.sbase = np.empty((len(self.phase_channels), 1))

                # extract phasor target values for each phase: targets get sorted in ascending phase order
                for channel, phase in enumerate(self.phase_channels):

                    self.Vmag_targ[phase] = phasor_target['phasor_targets'][
                        channel]['magnitude']
                    self.Vang_targ[phase] = phasor_target['phasor_targets'][
                        channel]['angle']
                    self.kvbase[phase] = phasor_target['phasor_targets'][
                        channel]['kvbase']['value']
                    self.sbase[phase] = phasor_target['phasor_targets'][
                        channel]['KVAbase']['value']

            # calculate relative voltage phasor
            self.phasor_calc(local_phasors, reference_phasors,
                             self.phase_channels)

            # calculate P/Q from actuators
            #self.PQ_solver(local_phasors, self.phase_channels)

            # convert to p.u.
            self.Vmag_relative_pu = self.Vmag_relative / (self.kvbase * 1000)

            # calculate phasor errors
            self.phasor_error_ang = self.Vang_targ - self.Vang_relative
            self.phasor_error_mag = self.Vmag_targ - self.Vmag_relative_pu

            if self.Psat == "initialize":
                n = 5  # saturation counter limit
                self.Psat = np.ones((np.size(self.phase_channels), n))
                self.Qsat = np.ones((np.size(self.phase_channels), n))
                self.ICDI_sigP = np.zeros((np.size(self.phase_channels), 1),
                                          dtype=bool)
                self.ICDI_sigQ = np.zeros((np.size(self.phase_channels), 1),
                                          dtype=bool)
                self.Pmax = np.empty((np.size(self.phase_channels), 1))
                self.Qmax = np.empty((np.size(self.phase_channels), 1))
                self.Pact = np.zeros((np.size(self.phase_channels), 1))
                self.Pcmd = np.zeros((np.size(self.phase_channels), 1))
                self.Pcmd_inv = np.zeros((np.size(self.phase_channels), 1))
                self.intError_ang = np.zeros((np.size(self.phase_channels), 1))
                self.intError_mag = np.zeros((np.size(self.phase_channels), 1))

            "Checking for P saturation (anti-windup control)"
            # find indicies where Pact + tolerance is less than Pcmd
            indexP = np.where(abs(self.Pcmd_inv) > 1000)[0]

            # initialize saturation counter for each phase
            sat_arrayP = np.ones((np.size(self.phase_channels), 1))

            # stop integrator for saturated phases
            for i in indexP:
                sat_arrayP[i] = 0

            # saturation counter check to determine if ICDI signal should be sent to SPBC
            self.Psat = np.append(self.Psat, sat_arrayP, axis=1)
            self.Psat = self.Psat[:, 1:]

            for phase in range(len(self.phase_channels)):
                if phase in np.where(~self.Psat.any(axis=1))[0]:
                    self.ICDI_sigP[phase] = True
                    if self.Pcmd_inv[phase] > 2000:
                        self.Pmax[phase] = (1000 *
                                            self.local_s_ratio_loadrack) / 1000
                    elif self.Pcmd_inv[phase] < 0:
                        self.Pmax[phase] = (-1000 *
                                            self.local_s_ratio_loadrack) / 1000
                else:
                    self.ICDI_sigP[phase] = False
                    self.Pmax[phase] = None

            "Checking for Q saturation (anti-windup control)"
            # find indicies where Qact + tolerance is less than Qcmd
            #indexQ = np.where(abs(self.Qact + (0.03 * self.Qcmd)) < abs(self.Qcmd))[0]

            # initialize saturation counter for each phase
            sat_arrayQ = np.ones((np.size(self.phase_channels), 1))

            # stop integrator for saturated phases
            #for i in indexQ:
            #sat_arrayQ[i] = 0

            # saturation counter check to determine if ICDI signal should be sent to SPBC
            self.Qsat = np.append(self.Qsat, sat_arrayQ, axis=1)
            self.Qsat = self.Qsat[:, 1:]

            for phase in range(len(self.phase_channels)):
                #if phase in np.where(~self.Qsat.any(axis=1))[0]:
                #self.ICDI_sigQ[phase] = False
                #self.Qmax[phase] = None
                #else:
                self.ICDI_sigQ[phase] = False
                self.Qmax[phase] = None

            "PI control algorithm"
            self.currentIntError_ang = (self.Ki_ang *
                                        self.phasor_error_ang) * sat_arrayP
            self.intError_ang += self.currentIntError_ang
            self.Pcmd_pu = (self.Kp_ang *
                            self.phasor_error_ang) + self.intError_ang

            self.currentIntError_mag = (self.Ki_mag *
                                        self.phasor_error_mag) * sat_arrayQ
            self.intError_mag += self.currentIntError_mag
            self.Qcmd_pu = (self.Kp_mag *
                            self.phasor_error_mag) + self.intError_mag

            # returns 8 signals: ICDI_sigP, ICDI_sigQ, Pmax, Qmax, phasor errors(2) to S-PBC; Pcmd, Qcmd to actuator
            # signals are column vector format: [number of phases/actuators x 1]

            # convert p.u. to W/ VARs (s base in units of kVA)
            self.Pcmd = self.Pcmd_pu * (self.sbase * 1000)
            self.Qcmd = self.Qcmd_pu * (self.sbase * 1000)
            print('ORT', self.Pcmd)

            self.Pcmd_inv = self.Pcmd / self.local_s_ratio_loadrack
            print('INV', self.Pcmd_inv)
            "http to inverters"
            #  Check hostname and port
            #  Sends P and Q command to actuator
            # http initialization
            if self.init_http == 0:
                if self.mode == 1 or self.mode == 2:
                    requests.get(
                        "http://131.243.41.47:9090/control?P_ctrl=97,Batt_ctrl=0"
                    )
                if self.mode == 3:
                    requests.get(
                        "http://131.243.41.47:9090/control?P_ctrl=0,Batt_ctrl=0"
                    )
                self.batt_export = np.zeros(
                    (len(phasor_target['phasor_targets']), 1))
                self.batt_cmd = np.zeros(
                    (len(phasor_target['phasor_targets']), 1))
                self.p_ctrl = np.zeros(
                    (len(phasor_target['phasor_targets']), 1))
                self.init_http = 1

            if self.mode == 4:  # Load racks
                session = FuturesSession()
                urls = []
                for phase, group in zip(range(len(self.Pcmd)), self.group_id):
                    self.p_ctrl[phase] = int(
                        np.round((-1. * self.Pcmd_inv[phase]) + 1000))
                    if self.p_ctrl[phase] > 2000:
                        self.p_ctrl[phase] = 2000
                        urls.append(
                            f"http://131.243.41.118:9090/control?group_id={group},P_ctrl=2000"
                        )
                    elif self.p_ctrl[phase] < 0:
                        self.p_ctrl[phase] = 0
                        urls.append(
                            f"http://131.243.41.118:9090/control?group_id={group},P_ctrl=0"
                        )
                    else:
                        urls.append(
                            f"http://131.243.41.118:9090/control?group_id={group},P_ctrl={self.p_ctrl[phase][0]}"
                        )
                print('INV_OFFSET', self.p_ctrl)
                responses = map(session.get, urls)
                results = [resp.result()
                           for resp in responses]  # results is status code
                print(results)

            "Status feedback to SPBC"
            status = {}
            status['phases'] = self.phases
            status['phasor_errors'] = {
                'V': list(self.phasor_error_mag.ravel()),
                'delta': list(self.phasor_error_ang.ravel())
            }
            status['p_saturated'] = list(self.ICDI_sigP.ravel())
            status['q_saturated'] = list(self.ICDI_sigQ.ravel())
            status['p_max'] = list(self.Pmax.ravel())
            status['q_max'] = list(self.Qmax.ravel())

            return status
 def _connect(self):
     self._session = FuturesSession(max_workers=DD_MAX_WORKERS)
     self._session.headers.update(self._HEADERS)
Ejemplo n.º 51
0
 def __init__(self):
     self.client = pymongo.MongoClient()
     self.db = self.client.steam
     self.session = FuturesSession(max_workers=100)
     self.torOff = False
LOG_PATH = "logs/"

# Setting up for logging purposes
utc = datetime.utcnow()
yyyy = utc.year
mm = utc.month
dd = utc.day

fileName = f'tomorrow_daily_list-{yyyy}-{mm}-{dd}.log'
FILE_PATH = LOG_PATH + fileName

# Checking for existence of directory
if (not os.path.isdir(LOG_PATH)):
    os.mkdir(LOG_PATH)

session = FuturesSession()

request = session.get(
    "https://api.guildwars2.com/v2/achievements/daily/tomorrow")
request_result = request.result()

dailyTom = json.loads(request_result.text)
dailyTomPVE = dailyTom['pve']

id_List = []

# Collecting the IDs of tomorrow's daily achievements
for entry in dailyTomPVE:
    id_List.append(entry['id'])

fp = open(FILE_PATH, "w")
Ejemplo n.º 53
0
class SCSOutputPlugin(OutputPlugin):
    useOutputQueue = False
    name = "scsout"
    MAXQUEUELENGTH = 1000

    def __init__(self, sample, output_counter=None):
        OutputPlugin.__init__(self, sample, output_counter)

        self.scsHttpPayloadMax = (
            150000  # Documentation recommends 20KB to 200KB. Going with 150KB.
        )
        self.scsEndPoint = getattr(self._sample, "scsEndPoint", None)
        self.scsAccessToken = getattr(self._sample, "scsAccessToken", None)
        self.scsClientId = getattr(self._sample, "scsClientId", "")
        self.scsClientSecret = getattr(self._sample, "scsClientSecret", "")
        self.scsRetryNum = int(getattr(self._sample, "scsRetryNum",
                                       0))  # By default, retry num is 0

        self._setup_REST_workers()

    def _setup_REST_workers(self, session=None, workers=10):
        # disable any "requests" warnings
        requests.packages.urllib3.disable_warnings()
        # Bind passed in samples to the outputter.
        if not session:
            session = Session()
        self.session = FuturesSession(
            session=session, executor=ThreadPoolExecutor(max_workers=workers))
        self.active_sessions = []

    def flush(self, events):
        if not self.scsEndPoint:
            if getattr(self.config, "scsEndPoint", None):
                self.scsEndPoint = self.config.scsEndPoint
            else:
                raise NoSCSEndPoint(
                    "Please specify your REST endpoint for the SCS tenant")

        if not self.scsAccessToken:
            if getattr(self.config, "scsAccessToken", None):
                self.scsAccessToken = self.config.scsAccessToken
            else:
                raise NoSCSAccessToken(
                    "Please specify your REST endpoint access token for the SCS tenant"
                )

        if self.scsClientId and self.scsClientSecret:
            logger.info(
                "Both scsClientId and scsClientSecret are supplied." +
                " We will renew the expired token using these credentials.")
            self.scsRenewToken = True
        else:
            if getattr(self.config, "scsClientId", None) and getattr(
                    self.config, "scsClientSecret", None):
                self.scsClientId = self.config.scsClientId
                self.scsClientSecret = self.config.scsClientSecret
                logger.info(
                    "Both scsClientId and scsClientSecret are supplied." +
                    " We will renew the expired token using these credentials."
                )
                self.scsRenewToken = True
            else:
                self.scsRenewToken = False

        self.header = {
            "Authorization": f"Bearer {self.scsAccessToken}",
            "Content-Type": "application/json",
        }

        self.accessTokenExpired = False
        self.tokenRenewEndPoint = "https://auth.scp.splunk.com/token"
        self.tokenRenewBody = {
            "client_id": self.scsClientId,
            "client_secret": self.scsClientSecret,
            "grant_type": "client_credentials",
        }

        for i in range(self.scsRetryNum + 1):
            logger.debug(f"Sending data to the scs endpoint. Num:{i}")
            self._sendHTTPEvents(events)

            if not self.checkResults():
                if self.accessTokenExpired and self.scsRenewToken:
                    self.renewAccessToken()
                self.active_sessions = []
            else:
                break

    def checkResults(self):
        for session in self.active_sessions:
            response = session.result()
            if (response.status_code == 401
                    and "Invalid or Expired Bearer Token" in response.text):
                logger.error("scsAccessToken is invalid or expired")
                self.accessTokenExpired = True
                return False
            elif response.status_code != 200:
                logger.error(
                    f"Data transmisison failed with {response.status_code} and {response.text}"
                )
                return False
        logger.debug("Data transmission successful")
        return True

    def renewAccessToken(self):
        response = requests.post(self.tokenRenewEndPoint,
                                 data=self.tokenRenewBody,
                                 timeout=5)
        if response.status_code == 200:
            logger.info("Renewal of the access token succesful")
            self.scsAccessToken = response.json()["access_token"]
            setattr(self._sample, "scsAccessToken", self.scsAccessToken)
            self.accessTokenExpired = False
        else:
            logger.error("Renewal of the access token failed")

    def _sendHTTPEvents(self, events):
        currentPayloadSize = 0
        currentPayload = []
        try:
            for event in events:
                # Reformat the event to fit the scs request spec
                # TODO: Move this logic to generator
                try:
                    event["body"] = event.pop("_raw")
                    event["timestamp"] = int(event.pop("_time") * 1000)
                    event.pop("index")
                    if "attributes" not in event:
                        event["attributes"] = {}
                        event["attributes"]["hostRegex"] = event.pop(
                            "hostRegex")
                except:
                    pass

                targetline = json.dumps(event)
                targetlinesize = len(targetline)

                # Continue building a current payload if the payload is less than the max size
                if (currentPayloadSize +
                        targetlinesize) < self.scsHttpPayloadMax:
                    currentPayload.append(event)
                    currentPayloadSize += targetlinesize
                else:
                    self.active_sessions.append(
                        self.session.post(
                            url=self.scsEndPoint,
                            data=json.dumps(currentPayload),
                            headers=self.header,
                            verify=False,
                        ))
                    currentPayloadSize = targetlinesize
                    currentPayload = [event]

            # Final flush of the leftover events
            if currentPayloadSize > 0:
                self.active_sessions.append(
                    self.session.post(
                        url=self.scsEndPoint,
                        data=json.dumps(currentPayload),
                        headers=self.header,
                        verify=False,
                    ))

        except Exception as e:
            logger.exception(str(e))
            raise e
Ejemplo n.º 54
0
class Connection(object):
    """Represents a connection to a server
    """

    TIMEOUT = 30
    MAX_RETRIES = 256
    MAX_WORKERS = multiprocessing.cpu_count()
    CHUNK_SIZE = 32
    TOKEN = ''
    USER_AGENT = config.get('User-Agent', 'name', 'libmozdata')
    X_FORWARDED_FOR = utils.get_x_fwed_for_str(config.get('X-Forwarded-For', 'data', ''))

    # Error 429 is for 'Too many requests' => we retry
    STATUS_FORCELIST = [429]

    def __init__(self, base_url, queries=None, **kwargs):
        """Constructor

        Args:
            base_url (str): the server's url
            queries (Optional[Query]): the queries
        """

        self.session = FuturesSession(max_workers=self.MAX_WORKERS)
        retries = Retry(total=Connection.MAX_RETRIES, backoff_factor=1, status_forcelist=Connection.STATUS_FORCELIST)
        self.session.mount(base_url, HTTPAdapter(max_retries=retries))
        self.results = []
        self.queries = queries

        if kwargs:
            if 'timeout' in kwargs:
                self.TIMEOUT = kwargs['timeout']
            if 'max_retries' in kwargs:
                self.MAX_RETRIES = kwargs['max_retries']
            if 'max_workers' in kwargs:
                self.MAX_WORKERS = kwargs['max_workers']
            if 'user_agent' in kwargs:
                self.USER_AGENT = kwargs['user_agent']
            if 'x_forwarded_for' in kwargs:
                self.X_FORWARDED_FOR = utils.get_x_fwded_for_str(kwargs['x_forwarded_for'])

        self.exec_queries()

    def __get_cb(self, query):
        """Get the callback to use when data have been retrieved

        Args:
            query (Query): the query

        Returns:
            function: the callback for the query
        """
        def cb(sess, res):
            if res.status_code == 200:
                try:
                    response = res.json()
                except ValueError:
                    response = res.text

                if query.handlerdata is not None:
                    query.handler(response, query.handlerdata)
                else:
                    query.handler(response)
            else:
                print('Connection error:')
                print('   url: ', res.url)
                print('   text: ', res.text)

        return cb

    def wait(self):
        """Just wait that all the queries have been treated
        """
        for r in self.results:
            r.result()

    def get_apikey(self):
        """Get the api key

        Returns:
            str: the api key
        """
        return self.TOKEN

    def get_header(self):
        """Get the header to use each query

        Returns:
            dict: the header
        """
        if self.X_FORWARDED_FOR:
            return {'User-Agent': self.USER_AGENT, 'X-Forwarded-For': self.X_FORWARDED_FOR, 'Connection': 'close'}
        else:
            return {'User-Agent': self.USER_AGENT, 'Connection': 'close'}

    def get_auth(self):
        """Get the auth to use each query

        Returns:
            dict: the auth
        """
        return None

    def exec_queries(self, queries=None):
        """Set and exec some queries

        Args:
            queries (Optional[Query]): the queries to exec
        """
        if queries:
            self.queries = queries

        if self.queries:
            if isinstance(self.queries, Query):
                self.queries = [self.queries]

            header = self.get_header()
            auth = self.get_auth()

            for query in self.queries:
                cb = self.__get_cb(query)
                if query.params:
                    if isinstance(query.params, dict):
                        self.results.append(self.session.get(query.url,
                                                             params=query.params,
                                                             headers=header,
                                                             auth=auth,
                                                             verify=True,
                                                             timeout=self.TIMEOUT,
                                                             background_callback=cb))
                    else:
                        for p in query.params:
                            self.results.append(self.session.get(query.url,
                                                                 params=p,
                                                                 headers=header,
                                                                 auth=auth,
                                                                 verify=True,
                                                                 timeout=self.TIMEOUT,
                                                                 background_callback=cb))
                else:
                    self.results.append(self.session.get(query.url,
                                                         headers=header,
                                                         auth=auth,
                                                         verify=True,
                                                         timeout=self.TIMEOUT,
                                                         background_callback=cb))

    @staticmethod
    def chunks(l, chunk_size=CHUNK_SIZE):
        """Get chunk from a list

        Args:
            l (List): data to chunkify
            chunk_size (Optional[int]): the size of each chunk

        Yields:
            a chunk from the data
        """
        for i in range(0, len(l), chunk_size):
            yield l[i:(i + chunk_size)]
Ejemplo n.º 55
0
from requests_futures.sessions import FuturesSession
import collections

url = 'https://www.ynet.co.il/home/0,7340,L-184,00.html'
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html5lib')

urls = [
    f'https://www.ynet.co.il{item.get("href")}'
    for item in soup.select('a.smallheader')
]

# print(urls)

word_counter = collections.Counter()


def parse_ynet_article(resp, *args, **kwargs):
    page = BeautifulSoup(resp.text, 'html5lib')
    text = page.select('.art_body span')[0].text
    word_counter.update(text.split())


session = FuturesSession()
session.hooks['response'] = parse_ynet_article

q = [session.get(url) for url in urls]
results = [f.result() for f in q]
print(len(results))
print(word_counter)
Ejemplo n.º 56
0
class Mimic2(TTS):

    def __init__(self, lang, config):
        super(Mimic2, self).__init__(
            lang, config, Mimic2Validator(self)
        )
        self.url = config['url']
        self.session = FuturesSession()
        chunk_size = config.get('chunk_size')
        self.chunk_size = \
            chunk_size if chunk_size is not None else 10

    def _save(self, data):
        """saves .wav files in tmp

        Args:
            data (byes): wav data
        """
        with open(self.filename, 'wb') as f:
            f.write(data)

    def _play(self, req):
        """play wav file after saving to tmp

        Args:
            req (object): requests object
        """
        if req.status_code == 200:
            self._save(req.content)
            play_wav(self.filename).communicate()
        else:
            LOG.error(
                '%s Http Error: %s for url: %s' %
                (req.status_code, req.reason, req.url))

    def _requests(self, chunks):
        """create asynchronous request list

        Args:
            chunks (list): list of text to synthesize

        Returns:
            list: list of FutureSession objects
        """
        reqs = []
        for chunk in chunks:
            if len(chunk) > 0:
                url = self.url + parse.quote(chunk)
                req_route = url + "&visimes=True"
                reqs.append(self.session.get(req_route, timeout=5))
        return reqs

    def visime(self, phonemes):
        """maps phonemes to visemes encoding

        Args:
            phonemes (list): list of tuples (phoneme, time_start)

        Returns:
            list: list of tuples (viseme_encoding, time_start)
        """
        visemes = []
        for pair in phonemes:
            if pair[0]:
                phone = pair[0].lower()
            else:
                # if phoneme doesn't exist use
                # this as placeholder since it
                # is the most common one "3"
                phone = 'z'
            vis = VISIMES.get(phone)
            vis_dur = float(pair[1])
            visemes.append((vis, vis_dur))
        return visemes

    def _normalized_numbers(self, sentence):
        """normalized numbers to word equivalent.

        Args:
            sentence (str): setence to speak

        Returns:
            stf: normalized sentences to speak
        """
        try:
            numbers = re.findall(r'\d+', sentence)
            normalized_num = [
                (num, pronounce_number(int(num)))
                for num in numbers
            ]
            for num, norm_num in normalized_num:
                sentence = sentence.replace(num, norm_num, 1)
        except TypeError:
            LOG.exception("type error in mimic2_tts.py _normalized_numbers()")
        return sentence

    def execute(self, sentence, ident=None):
        """request and play mimic2 wav audio

        Args:
            sentence (str): sentence to synthesize from mimic2
            ident (optional): Defaults to None.
        """
        create_signal("isSpeaking")

        sentence = self._normalized_numbers(sentence)

        # Use the phonetic_spelling mechanism from the TTS base class
        if self.phonetic_spelling:
            for word in re.findall(r"[\w']+", sentence):
                if word.lower() in self.spellings:
                    sentence = sentence.replace(word,
                                                self.spellings[word.lower()])

        chunks = sentence_chunker(sentence, self.chunk_size)
        try:
            for idx, req in enumerate(self._requests(chunks)):
                results = req.result().json()
                audio = base64.b64decode(results['audio_base64'])
                vis = self.visime(results['visimes'])
                key = str(hashlib.md5(
                    chunks[idx].encode('utf-8', 'ignore')).hexdigest())
                wav_file = os.path.join(
                    get_cache_directory("tts"),
                    key + '.' + self.audio_ext
                )
                with open(wav_file, 'wb') as f:
                    f.write(audio)
                self.queue.put((self.audio_ext, wav_file, vis, ident))
        except (ReadTimeout, ConnectionError, ConnectTimeout, HTTPError):
            raise RemoteTTSTimeoutException(
                "Mimic 2 remote server request timedout. falling back to mimic"
            )
Ejemplo n.º 57
0
 def __init__(self):
     self.kwargs = None
     self.url = None
     self.session = FuturesSession()
Ejemplo n.º 58
0
def get_boostrap_static():
    url = f'{ROOT_URL}/bootstrap-static'
    with FuturesSession() as session:
        return session.get(url).result().json()
Ejemplo n.º 59
0
class HTTPDriver(BaseDriver):
    """HTTPDriver

  The :class:`HTTPDriver` class reads SBP messages from an HTTP
  service for a device and writes out to a stream. This driver is like
  a file-handle with read and writes over two separately HTTP
  connections, but can also be enabled and disabled by its consumer.

  Parameters
  ----------
  device_uid : uid
    Device unique id
  url : str
    HTTP endpoint
  retries : tuple
    Configure connect and read retry count. Defaults to
    (MAX_CONNECT_RETRIES, MAX_READ_RETRIES).
  timeout : tuple
    Configure connect and read timeouts. Defaults to
    (DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT).

  """
    def __init__(
        self,
        device_uid=None,
        url="https://broker.staging.skylark.swiftnav.com",
        retries=DEFAULT_RETRIES,
        timeout=DEFAULT_TIMEOUT,
    ):
        retry = Retry(connect=DEFAULT_RETRIES[0],
                      read=DEFAULT_RETRIES[1],
                      redirect=MAX_REDIRECTS,
                      status_forcelist=[500],
                      backoff_factor=DEFAULT_BACKOFF_FACTOR)
        self.url = url
        self.read_session = requests.Session()
        self.read_session.mount(
            "http://",
            HTTPAdapter(pool_connections=DEFAULT_POOLSIZE,
                        pool_maxsize=DEFAULT_POOLSIZE,
                        pool_block=DEFAULT_POOLBLOCK,
                        max_retries=retry))
        self.read_session.mount(
            "https://",
            HTTPAdapter(pool_connections=DEFAULT_POOLSIZE,
                        pool_maxsize=DEFAULT_POOLSIZE,
                        pool_block=DEFAULT_POOLBLOCK,
                        max_retries=retry))
        self.write_session = None
        self.device_uid = device_uid
        self.timeout = timeout
        self.read_response = None
        self.write_response = None
        self.source = None

    def flush(self):
        """File-flush wrapper (noop).

    """
        pass

    def close(self):
        """File-handle close wrapper (noop).

    """
        try:
            self.read_close()
            self.write_close()
        except:
            pass

    @property
    def write_ok(self):
        """
    Are we connected for writes?
    """
        # Note that self.write_response is either None or a Response
        # object, which cast to False for 4xx and 5xx HTTP codes.
        return bool(self.write_response)

    def connect_write(self, source, whitelist, pragma=None):
        """Initialize a streaming write HTTP response. Manually connects the
    underlying file-handle. In the event of a network disconnection,
    use to manually reinitiate an HTTP session.

    Parameters
    ----------
    source : sbp.client.handler.Handler
      Iterable source of SBP messages.
    whitelist : [int]
      Whitelist of messages to write

    """
        headers = {
            'Device-Uid': self.device_uid,
            'Content-Type': BROKER_SBP_TYPE,
            'Pragma': pragma
        }
        if not pragma:
            del headers['Pragma']
        try:
            self.executor = ThreadPoolExecutor(max_workers=DEFAULT_POOLSIZE)
            self.write_session = FuturesSession(executor=self.executor)
            self.source = source.filter(whitelist)
            gen = (msg.pack() for msg, _ in self.source)
            self.write_session.put(self.url, data=gen, headers=headers)
            self.write_response = True
        except requests.exceptions.ConnectionError:
            msg = "Invalid request to %s with headers %s." % (self.url,
                                                              headers)
            warnings.warn(msg)
        except requests.exceptions.ConnectTimeout:
            pass
        except requests.exceptions.RetryError:
            pass
        except requests.exceptions.ReadTimeout:
            msg = "Invalid request to %s with headers %s." % (self.url,
                                                              headers)
            warnings.warn(msg)
        return self.write_ok

    def write(self, data):
        """Write wrapper (noop). Actual stream is initiated by the write
    connection.

    Parameters
    ----------
    data : object
      Data to write.

    """
        pass

    def write_close(self):
        """File-handle close wrapper (noop).

    """
        try:
            self.write_session.close()
            self.executor.shutdown(wait=False)
            self.source.breakiter()
            self.source = None
            self.executor = None
            self.write_session = None
        except:
            pass

    @property
    def read_ok(self):
        """
    Are we connected for reads?
    """
        return bool(self.read_response)

    def connect_read(self, pragma=None):
        """Initialize a streaming read/write HTTP response. Manually connects
    the underlying file-handle. In the event of a network
    disconnection, use to manually reinitiate an HTTP session.

    """
        headers = {
            'Device-Uid': self.device_uid,
            'Accept': BROKER_SBP_TYPE,
            'Pragma': pragma
        }
        if not pragma:
            del headers['Pragma']
        try:
            self.read_response = self.read_session.get(self.url,
                                                       stream=True,
                                                       headers=headers,
                                                       timeout=self.timeout)
        except requests.exceptions.ConnectionError:
            msg = "Invalid request to %s with headers %s." % (self.url,
                                                              headers)
            warnings.warn(msg)
        except requests.exceptions.ConnectTimeout:
            pass
        except requests.exceptions.RetryError:
            pass
        except requests.exceptions.ReadTimeout:
            msg = "Invalid request to %s with headers %s." % (self.url,
                                                              headers)
            warnings.warn(msg)
        return self.read_ok

    def read(self, size):
        """Read wrapper. If the client connection is closed or some other
    exception is thrown, raises an IOError.

    Parameters
    ----------
    size : int
      Size to read (in bytes).

    Returns
    ----------
    bytearray, or None

    """
        if self.read_response is None or not self.device_uid:
            raise ValueError("Invalid/insufficient HTTP request parameters!")
        elif not self.read_ok or self.read_response.raw.closed:
            raise IOError("HTTP read closed?!")
        try:
            return self.read_response.raw.read(size)
        except:
            raise IOError("HTTP read error!")

    def read_close(self):
        """File-handle close wrapper (noop).

    """
        try:
            self.read_response.close()
            self.read_response = None
        except:
            pass
 def _session(cls) -> FuturesSession:
   return FuturesSession(executor=ThreadPoolExecutor(max_workers=10))