from requests_threads import AsyncSession

session = AsyncSession(n=100)

async def _main():
    rs = []
    for _ in range(100):
        rs.append(await session.get('http://httpbin.org/get'))
    print(rs)

if __name__ == '__main__':
    session.run(_main)
Beispiel #2
0
import cv2
import keyboard

# get sensor data in json formate
import requests
import json

from requests_threads import AsyncSession
session = AsyncSession(n=100)

async def _main():
    # config for ip camera
    ip, port= "192.168.137.83", "5678"
    url = 'http://'+ ip+ ":"+ port+ "/sensors.json"


    width, height= 3000, 1500

    pic= cv2.imread('./sample.png')
    pic= cv2.resize(pic, (width, height))

    y1, x1= 250, 1000
    x= 0
    while (True):
        x= x+ 1
        print(x)
        resp = requests.get(url=url,  verify=False)
        
        data = resp.json() # Check the JSON Response Content documentation below

        img= pic[y1: y1+1000, x1: x1+ 1000, :]
from gevent import monkey

monkey.patch_all()

import time
from worldLoader import WorldSlice, setBlock
import grequests
from requests_threads import AsyncSession

rect = (-33, 27, 8, 8)
ry = 69
# slice = WorldSlice(rect)

session = AsyncSession(n=64)


async def _main():
    for i in range(32):
        x = rect[0]
        y = i + ry
        z = rect[1]
        block = "minecraft:stone"

        urls = []
        for dx in range(0, rect[2]):
            for dz in range(0, rect[3]):
                urls.append('http://localhost:9000/blocks?x=%i&y=%i&z=%i' %
                            (x + dx, y, z + dz))

        rs = []
        for url in urls:
Beispiel #4
0
# for asynchronous way of 3 HTTP calls, this program will use the
# requests_threads.py script from the GitHub repository requests/requests-threads
# as well as the Twisted engine
from requests_threads import AsyncSession

print("Part 2: 3 HTTP Asynchronous Calls")

session = AsyncSession(n=3)


async def main():
    rArray = []
    for _ in range(3):
        rArray.append(await session.get(
            'https://webhook.site/2ccbcad3-a4d2-45e3-a047-364c93da579c'))
    for r in rArray:
        print("X-request-Id:", r.headers['X-request-Id'])
        print("Date:", r.headers['Date'])


session.run(main)
Beispiel #5
0
 def __init__(self, crawler):
     logger.debug(f"New Downloader with spider {crawler.spider.name}")
     self.session = AsyncSession(n=100)
     self.crawler = crawler
from requests_threads import AsyncSession

import logging
logger = logging.getLogger()

parser = argparse.ArgumentParser(description='Concurrent Traffic Generator')
parser.add_argument('concurrent',
                    type=int,
                    help='Number of Concurrent Requests')
parser.add_argument('total', type=int, help='Total number of Requests to Make')
parser.add_argument('url', type=str, help='URL to fetch')
args = parser.parse_args()

asyncio.set_event_loop(asyncio.new_event_loop())

session = AsyncSession(n=args.concurrent)
Pin.override(session, service='concurrent-requests-generator')


async def generate_requests():
    with tracer.trace('flask.request',
                      service='concurrent-requests-generator'):
        rs = []
        for _ in range(args.total):
            rs.append(session.get(args.url))
        for i in range(args.total):
            rs[i] = await rs[i]
        print(rs)


session.run(generate_requests)