示例#1
0
def public_stream (token):
    """
    To collect tweets using public stream.

    param - A list of the field based on which tweets needs to be collected
    token - A list containing client_key, client_secret , resource_owner_key, resource_owner_secret
    """
    count = []
    httpcount = []
    count.append(1)
    httpcount.append(1)
    url = "https://stream.twitter.com/1.1/statuses/sample.json"
    headeroauth = OAuth1(signature_type='auth_header', **token)
    while True:
        r = req.get(url, auth=headeroauth, timeout=90.0, stream=True)
        if r.status_code == 200:
            count[0] = 1
            httpcount[0] = 1
            for tweet in r.iter_lines():
                if tweet: 
                    yield tweet
        if r.status_code == 420:
            stream_rate_limit(r, count)
        else:
            stream_rate_limit(r, httpcount)
        continue

    raise SystemExit()
示例#2
0
def get_response(url):
    try:
        r = requests.get(url, timeout=30)
    except Exception as e:
        r = None
        #log.warn('get_r %s %s', url, e)
    return r
示例#3
0
def volume():
    v = req.get('/v1/me/player')
    v_json = v.json()
    vol_device = v_json["device"]
    vol = vol_device["volume_percent"]
    print(vol)
    return vol
示例#4
0
def get_pool_assignments(pool_id, status, start_ts=None):
    if start_ts:
        s = f'&created_gt={start_ts}'
    else:
        s = ''
    return req.get(
        f'/api/v1/assignments?pool_id={pool_id}&status={status}{s}').json()
示例#5
0
def play_pause(channel):
    r = req.get('/v1/me/player')
    play_pause_json = json.loads(r.text)
    if play_pause_json["is_playing"] == False:
        print("play")
        req.put('/v1/me/player/play', '')
    else:
        print("pause")
        req.put('/v1/me/player/pause', '')
示例#6
0
def get(url):
    text = ''
    try:
        r = requests.get(url, allow_redirects=False, timeout=30)
        if r.status_code == 200:
            text = r.text
    except Exception as e:
        text = ''
        #log.warn('get %s %s', url, e)
    return text
示例#7
0
 def tst_net(self):
     '''
     测试网络是否认证
     :return: 是否已经认证
     '''
     res = req.get('http://auth.ysu.edu.cn', headers=self.header)
     if res.geturl().find('success.jsp') > 0:
         self.isLogined = True
     else:
         self.isLogined = False
     return self.isLogined
示例#8
0
def history(ticker: str):
    h = get(
        'history', {
            'symbol': ticker,
            'exchange': 'SPBX',
            'tf': 86400,
            'from': start,
            'to': int(datetime.now().timestamp())
        })['history']
    for i in h:
        i['time'] = datetime.fromtimestamp(i['time']).date().isoformat()
    return h
示例#9
0
def map_image(detail_url):
    #다이닝 코드 상세 맛집 url에서 위도, 경도 정보 파싱해서 가져오기
    html = req.get(detail_url).text
    soup = bfs(html, 'html.parser')

    soup_lat = soup.select('#hdn_lat')  #위도
    soup_lng = soup.select('#hdn_lng')  #경도

    if soup_lat is not None and len(
            soup_lat) > 0 and soup_lng is not None and len(soup_lng) > 0:

        latitude = soup_lat[0]['value']
        longitude = soup_lng[0]['value']

        real_latitude = float(latitude)
        real_longitude = float(longitude)

        #folium 라이브러리 활용, 맛집에 마커된 지도 html파일 생성
        food_location = [real_latitude, real_longitude]
        map = folium.Map(location=food_location, zoom_start=25)
        folium.Marker(food_location, popup='destination').add_to(map)
        map.save('./location.html')
        map

        #selenium 라이브러리 활용, 지도 html파일을 스크린샷캡쳐해서 정적이미지 파일로 생성
        browser = webdriver.Chrome(
            'C:/Users/yurim/Desktop/chromedriver.exe')  #크롬드라이버경로넣어줘야함
        browser.get(
            'C:/Users/yurim/Documents/GitHub/capstone-capyou/code/complete_code/location.html'
        )  #지도 html경로
        browser.save_screenshot('restaurant_location.png')
        #time.sleep(2)
        #browser.quit() #동적 지도창 닫지 않기 위해서 주석 처리
        #다른 맛집 검색하거나 끝 누르면 html 창 자동으로 사라짐, 그 전에 마음대로 닫으면 오류 발생

        # slackbot 답변으로 위에서 저장된 이미지 파일 답변하기
        map_image_file = {
            'file':
            ('restaurant_location.png', open('restaurant_location.png',
                                             'rb'), 'png')
        }

        map_image_file_detail = {
            "filename": "restaurant_location.png",
            "token": token,
            "channels": ['#general']
        }
        r = req.post("https://slack.com/api/files.upload",
                     params=map_image_file_detail,
                     files=map_image_file)

    else:
        return
示例#10
0
def test_good():
    r = req.get('http://032c.com/sitemap.xml.gz')
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'urlset')
    ok_(sm['items'] > 1)

    r = req.get('http://www.aftenposten.no/external/xmlsitemap/sitemap.xml.gz')
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'sitemapindex')
    ok_(sm['items'] > 1)

    r = req.get('http://100wordblog.wordpress.com/sitemap.xml')
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'urlset')
    ok_(sm['items'] > 1)

    r = req.get('http://wayland.patch.com/sitemaps/articles')
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'urlset')
    ok_(sm['items'] > 1)

    r = req.get('http://4g-portal.com/sitemap.xml.gz')
    print r.headers
    print r.content[:10]
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'urlset')
    ok_(sm['items'] > 1)

    r = req.get('http://cruisetalk.org/sitemap_attachment.xml.gz')
    print r.headers
    print r.content[:10]
    sm = sitemap.parse_sitemap(r)
    eq_(sm['type'], 'urlset')
    ok_(sm['items'] > 1)
示例#11
0
def users_lookup(user_ids, token):
    """
    Lookup profiles of as many users as possible.

    user_ids - A list of user_ids of twitter users (max 100).
    returns  - A list of profiles for the given users. The list may not contain
               profiles for all the users.
    """
    headeroauth = OAuth1(signature_type='auth_header', **token)    

    user_ids = map(str, user_ids)
    user_ids = ",".join(user_ids)

    url = "http://api.twitter.com/1.1/users/lookup.json"
    params = {
        "user_id": user_ids,
        "include_entities": 1
    }

    tries = 0
    while tries < MAX_RETRY:
        r = req.get(url, params=params, auth=headeroauth, timeout=60.0)

        # Proper receive
        if r.status_code == 200:
            check_rate_limit(r)
            return r.json()

        # User doesn't exist
        if r.status_code in (403, 404):
            log.info(u"Try {}: User doesn't exist - {} {}",
                     tries, r.status_code, r.text)
            check_rate_limit(r)
            return []

        # Check if rate limited
        if r.status_code == 400:
            log.info(u"Try {}: Being throttled - {} {}",
                     tries, r.status_code, r.text)
            check_rate_limit(r)
            tries += 1
            continue

        # Dont expect anything else
        log.warn(u"Try {}: Unexepectd response - {} {}",
                 tries, r.status_code, r.text)
        check_rate_limit(r)
        tries += 1
        continue

    log.critical("Maximum retries exhausted ...")
    raise SystemExit()
示例#12
0
 def get_alldata(self):
     '''
     获取当前认证账号全部信息
     #!!!注意!!!#此操作会获得账号alldata['userId']姓名alldata['userName']以及密码alldata['password']
     :return:全部数据的字典格式
     '''
     res = req.get(
         'http://auth.ysu.edu.cn/eportal/InterFace.do?method=getOnlineUserInfo',
         headers=self.header)
     try:
         self.alldata = json.loads(res.read().decode('utf-8'))
     except json.decoder.JSONDecodeError as e:
         print('数据解析失败,请稍后重试。')
     return self.alldata
示例#13
0
def users_show(user_id, token):
    """
    Get the profile of the given of the given user.

    user_id - A user_id of a single twitter user.
    return  - A 2 tuple. First is the status code returned by Twitter, second is
              the profile of the Twitter user. If status code is 403 or 403, the
              profile will instead contain the reason of absence of the profile.
    """

    url = "http://api.twitter.com/1.1/users/show.json"
    params = {
        "user_id": user_id,
        "include_entities": 1
    }
    
    headeroauth = OAuth1( signature_type='auth_header', **token)
    tries = 0
    while tries < MAX_RETRY:
        r = req.get(url, params=params, auth=headeroauth, timeout=60.0)

        # Proper receive
        if r.status_code == 200:
            check_rate_limit(r)
            return (200, r.json())

        # User doesn't exist
        if r.status_code in (403, 404):
            check_rate_limit(r)
            return (r.status_code, r.json())

        # Check if rate limited
        if r.status_code == 400:
            log.info(u"Try {}: Being throttled - {} {}",
                     tries, r.status_code, r.text)
            check_rate_limit(r)
            tries += 1
            continue

        # Dont expect anything else
        log.warn(u"Try {}: Unexepectd response - {} {}",
                 tries, r.status_code, r.text)
        check_rate_limit(r)
        tries += 1
        continue

    log.critical("Maximum retries exhausted ...")
    raise SystemExit()
示例#14
0
    def logout(self):
        '''
        登出,操作内会自动获取特征码
        :return:元祖第一项:是否操作成功;第二项:详细信息
        '''
        if self.alldata == None:
            self.get_alldata()

        res = req.get(self.url + 'logout', headers=self.header)
        logout_json = json.loads(res.read().decode('utf-8'))
        #self.info = logout_json
        self.info = logout_json['message']
        if logout_json['result'] == 'success':
            return (True, '下线成功')
        else:
            return (False, self.info)
示例#15
0
def dining_code(q):
    params = {
        'query': q
    }
    # 응답 get 요청
    html = req.get('http://www.diningcode.com/list.php?', params=params).text

    soup = bfs(html, 'html.parser')
    rank_list = []

    # 1위 ~10위 식당이름하고, 링크
    # print(enumerate(soup.select('span.btxt'), 1))
    for idx, tag in enumerate(soup.select('span.btxt'), 1):
        rank_list.append('{}'.format(tag.text))

    return rank_list
示例#16
0
    def login(self, user, pwd, type, code=''):
        '''
        输入参数登入校园网,自动检测当前网络是否认证。
        :param user:登入id
        :param pwd:登入密码
        :param type:认证服务
        :param code:验证码
        :return:元祖第一项:是否认证状态;第二项:详细信息
        '''
        if self.isLogined == None:
            self.tst_net()
        if self.isLogined == False:
            if user == '' or pwd == '':
                return (False, '用户名或密码为空')
            self.data = {
                'userId': user,
                'password': pwd,
                'service': self.services[type],
                'operatorPwd': '',
                'operatorUserId': '',
                'validcode': code,
                'passwordEncrypt': 'False'
            }
            res = req.get('http://auth.ysu.edu.cn', headers=self.header)
            queryString = re.findall(r"href='.*?\?(.*?)'",
                                     res.read().decode('utf-8'), re.S)
            self.data['queryString'] = queryString[0]

            res = req.post(self.url + 'login',
                           headers=self.header,
                           data=self.data)
            login_json = json.loads(res.read().decode('utf-8'))
            self.userindex = login_json['userIndex']
            #self.info = login_json
            self.info = login_json['message']
            if login_json['result'] == 'success':
                return (True, '认证成功')
            else:
                return (False, self.info)
        return (True, '已经在线')
示例#17
0
def dining_code(q):
    params = {'query': q}
    # 응답 get 요청
    html = req.get('http://www.diningcode.com/list.php?', params=params).text

    soup = bfs(html, 'html.parser')
    rank_list = []  # 1~10 index에 맛집 '순위.가게이름' 저장할 리스트
    simple_explain = []  # 1~10 index에 1~10위 맛집의 간단한 메뉴 설명 저장할 리스트
    url_list = []  # 1~10 index에 맛집 링크 저장할 리스트
    # 1위 ~10위 식당이름하고, 링크 / 배열에는 검색된 것만큼 저장됨!
    for tag in soup.find_all('a', 'blink'):  #[0]은 광고글
        title = tag.select('.btxt')  #태그의 class가 btxt인 것 셀렉트
        title_name = title[0].get_text()  #title에서 텍스트만 뽑아 저장

        simple = tag.select('.stxt')  #태그의 class가 stxt인 것 셀렉트
        simple_name = simple[0].get_text()  #simple에서 텍스트만 뽑아 저장

        title_link = tag.get('href')  #a 태그에서 href 속성을 이용, 주소를 저장

        rank_list.append(title_name)  #rank_list 리스트에 맛집 이름 덧붙임
        simple_explain.append(simple_name)  #simple_explain 리스트에 맛집 간단 설명 덧붙임
        url_list.append('https://www.diningcode.com' +
                        title_link)  #url_list 리스트에 맛집 링크 덧붙임
    return rank_list, simple_explain, url_list
示例#18
0
def clone_pool(pool_id):
    r = req.post(f'/api/v1/pools/{pool_id}/clone', None)
    op = r.json()['id']
    r = req.get(f'/api/v1/operations/{op}')
    return int(r.json()['details']['pool_id'])
示例#19
0
 def websearch(self, query, page=1, facets=''):
     headers = {'Authorization': self.access_token}
     self.r = req.get("https://api.zoomeye.org/web/search", headers=headers, params={'query': query, 'page': page, 'facets': facets})
     return self.requestisok()
示例#20
0
 def resourcesinfo(self):
     headers = {'Authorization': self.access_token}
     self.r = req.get('https://api.zoomeye.org/resources-info', headers=headers)
     return self.requestisok()
示例#21
0
def test_blog():
    r = req.get('http://111degreeswest.blogspot.com/feeds/posts/default?orderby=UPDATED')
    sm = sitemap.parse_sitemap(r)
    eq_(sm, None)
                page = 1
            url_ = url + str(page)
            driver = selenium.webdriver.Chrome(options=option)
            driver.set_window_size(10000, 60000)
            html = driver.get(url_)

            sleep(6)
            html = driver.page_source
            soup = BeautifulSoup(html, "lxml")

            while (soup.find_all(name='li', attrs={'class': 'pix-card'}) !=
                   []):

                for i in soup.find_all(name='li', attrs={'class': 'pix-card'}):
                    try:
                        img_full_url, title, author = get(i)
                        if img_full_url is None and title is None and author is None:
                            continue
                        if author in author_dic.keys():
                            #num = author_dic[author]
                            if author_dic[author] == -1:
                                pass
                            else:
                                save_img(img_full_url, author, title)
                        else:
                            author_num += 1
                            author_dic[author] = str(author_num)

                            save_img(img_full_url, author, title)
                    except Exception:
                        continue
示例#23
0
def get_reading(server, value_descriptor, num_vals):
    code, json = req.get('http://localhost:48080/api/v1/reading/name/' +
                         value_descriptor + '/' + str(num_vals))
    return code, json
示例#24
0
from db import session
from history import history
from models.Stock import Stock
from models.Trade import Trade
from req import get

# h = history('INTC')
# o = get('orderbooks/MOEX/SBER')
# print(h)
stocks = ('INTC', 'IBM', 'GTX', 'ZM', 'AAPL', 'TSLA', 'SBER', 'YNDX')

q = get('securities/MOEX%3ASBER%2CMOEX%3AGAZP/quotes')
print(q)
示例#25
0
def pool_status(pool_id):
    r = req.get(f'/api/v1/pools/{pool_id}')
    s = r.json().get('status')
    if not s:
        return r.json()
    return s
示例#26
0
def user_timeline(user_id, token):
    """
    Get as many tweets from the user as possible.

    user_id - A single user_id of a twitter user.
    returns - A list of tweets for the given user. The list may be empty.
    """

    url = "http://api.twitter.com/1.1/statuses/user_timeline.json"
    headeroauth = OAuth1(signature_type='auth_header', **token)
    params = {
        "user_id": user_id,
        "count": 200,
        "include_rts": 1,
        "include_entities": 1
    }

    # We gather all tweets here
    tweets = []
    ids = set()
    tcount = 0

    tries = 0
    while tries < MAX_RETRY:
        r = req.get(url, params=params, auth=headeroauth, timeout=60.0)
        # Proper receive
        if r.status_code == 200:
            for tweet in r.json():
                if tweet["id"] not in ids:
                    tweets.append(tweet)
                    ids.add(tweet["id"])

            # If we have not added any more tweets; return
            if len(ids) == tcount:
                return tweets
            tcount = len(ids)

            # Set the new max_id value
            params["max_id"] = min(ids)
            tries = 0
            check_rate_limit(r)
            continue

        # Check if rate limited
        if r.status_code == 400:
            log.info(u"Try {}: Being throttled - {} {}",
                     tries, r.status_code, r.text)
            check_rate_limit(r)
            tries += 1
            continue

        # User doesn't exist
        if r.status_code in (401, 403, 404):
            log.info(u"Try {}: User doesn't exist - {} {}",
                     tries, r.status_code, r.text)
            check_rate_limit(r)
            return tweets

        # Dont expect anything else
        log.warn(u"Try {}: Unexepectd response - {} {}",
                 tries, r.status_code, r.text)
        check_rate_limit(r)
        tries += 1
        continue

    log.critical("Maximum retries exhausted ...")
    raise SystemExit()
示例#27
0
def dining_code_detail(detail_url):
    html = req.get(detail_url).text
    soup = bfs(html, 'html.parser')

    # 식당 대표 이미지
    soup_food_image = soup.select('li.bimg > img')
    if soup_food_image is not None and len(soup_food_image) > 0:
        food_image = soup_food_image[0]['src']
    else:
        food_image = ""
    #notification(food_image)  # 맛집이미지 답변
    #time.sleep(1)

    # 영업시간
    run_hour1 = soup.findAll('p', 'l-txt')
    run_hour2 = soup.findAll('p', 'r-txt')
    if run_hour1 is not None and len(
            run_hour1) > 0 and run_hour2 is not None and len(run_hour2) > 0:
        hour_section1 = run_hour1[0].get_text()
        hour_section2 = run_hour2[0].get_text()
        possible_hour = "- 영업시간: " + hour_section1 + " " + hour_section2
        # notification(possible_hour)  # 영업시간 답변
    else:
        possible_hour = ""
    #possible_hour = "- 영업시간: " + soup.select('p.l-txt')[0].text + " " + soup.select('p.r-txt')[0].text

    # 평균 평점
    soup_average_point = soup.select('p.star-point > span.point')
    if soup_average_point is not None and len(soup_average_point) > 0:
        star_point = "- 평균평점: " + soup_average_point[0].text + "(5점 만점)"
        # notification(star_point)
    else:
        star_point = ""
    #average_point = "- 평균평점: " + soup.select('p.star-point > span.point')[0].text + "(5점 만점)"
    #notification(average_point) #평균평점 답변

    # 블로그 후기 url 1개
    soup_blog_review = soup.select('#div_blog > li > a')
    if soup_blog_review is not None and len(soup_blog_review) > 0:
        blog_review = "- 블로그 후기 URL: " + soup_blog_review[0]['href']
    else:
        blog_review = ""
        # notification(blog_review)

    #주소
    soup_map = soup.select('li.locat')
    if soup_map is not None and len(soup_map) > 0:
        map_addr = "- 주소: " + soup_map[0].text
    else:
        map_addr = ""
        # notification(map_addr)
        # 지도 이미지 답변

    #매장 전화번호
    soup_tel = soup.select('li.tel')
    if soup_tel is not None and len(soup_tel) > 0:
        tel_number = "- 전화: " + soup_tel[0].text
    else:
        tel_number = ""

    #관련 해시태그
    soup_tag = soup.select('li.tag')
    if soup_tag is not None and len(soup_tag) > 0:
        hashtag = "- 태그: " + soup_tag[0].text
    else:
        hashtag = ""

    #대표 메뉴, 가격
    soup_menu_cost = soup.select('div.menu-info.short > ul.list > li')
    if soup_menu_cost is not None and len(soup_menu_cost) > 0:
        menu_cost = "- 대표 메뉴 및 가격: " + soup_menu_cost[0].text
    else:
        menu_cost = ""
    # if run_hour1 is not None and len(run_hour1) > 0 and run_hour2 is not None and len(run_hour2) > 0:
    #     menu_cost = "대표 메뉴 및 가격: " + run_hour1[2].get_text() + run_hour2[2].get_text() + "/" + run_hour1[3].get_text() + run_hour2[3].get_text() + "/" + run_hour1[4].get_text() + run_hour2[4].get_text()

    # menu_cost = "대표 메뉴 및 가격: "
    # for tag in soup.find_all('div', 'menu-info.short'):
    #     menu = tag.select('p')
    #     for i in [1,2,3,4,5,6]:
    #         menu_cost += menu[i].text

    dic = {
        "color":
        "#CEE3F6",
        "text":
        possible_hour + "\n" + menu_cost + "\n" + star_point + "\n" +
        blog_review + "\n" + hashtag + "\n" + tel_number + "\n" + map_addr +
        "\n",
        "image_url":
        food_image
    }
    attachments = [dic]
    notification2(attachments)

    map_image(detail_url)  # 함수 호출
    # notification("------------------------------------------------------------------------------")

    dic = {
        "color": "#FA5858",
        "text":
        "☞검색된 맛집의 상세한 정보가 알고 싶다면 해당 맛집의 순위를 입력해주세요\nex)'1' -> 1위의 정보가 나옵니다.\n☞맛집을 재검색하시려면 '끝' 이라고 입력해주세요",
        "mrkdwn_in": ["text", "pretext"]
    }
    attachments = [dic]
    notification2(attachments)
示例#28
0
def get_balance():
    r = req.get('/api/v1/requester')
    return r.json()['balance']
示例#29
0
    systemSpam.exit(1)

nextPageToken = ''
while (True):
    # Make sure access token is valid before request
    if (credentials.access_token_expired):
        # Access token expired, get a new one
        token_obj = credentials.get_access_token(
        )  #get_access_token() should refresh the token automatically
        token_str = str(token_obj.access_token)

    url = 'https://content.googleapis.com/youtube/v3/liveChat/messages?liveChatId=' + liveChatID + '&part=snippet,authorDetails&pageToken=' + nextPageToken

    headers = {"Authorization": "Bearer " + token_str}

    r = req.get(url, headers=headers)

    if (r.status_code == 200):
        resp = r.json()
        if (debug >= 2):
            print json.dumps(resp, indent=4, sort_keys=True)

        nextPageToken = resp["nextPageToken"]

        messages = resp["items"]

        for message in messages:
            #Message handling
            handle_msg(message)

        delay_ms = resp['pollingIntervalMillis']
示例#30
0
from req import get

m = get('Clients/MOEX/D70657/positions')
s = get('Clients/SPBX/D70657/positions', None, 'https://api.alor.ru/md/')