Пример #1
0
def get_device_code():
    code_request_params = {
        "url": API_URL + "/oauth/device/code",
        "headers": {
            "Content-Type": "application/json"
        },
        "json": {
            "client_id": CLIENT_ID
        }
    }
    code_resp = safe_request('post', code_request_params)
    return code_resp.json() if code_resp else None
Пример #2
0
def scrobble(verb, media_info, progress, *args, **kwargs):
    scrobble_data = prepare_scrobble_data(**media_info)
    logger.debug(scrobble_data)
    if not scrobble_data:
        return None
    scrobble_data['progress'] = progress
    scrobble_params = {
        "url": API_URL + '/scrobble/' + verb,
        "headers": get_headers(),
        "json": scrobble_data
    }
    scrobble_resp = safe_request('post', scrobble_params)
    return scrobble_resp.json() if scrobble_resp else None
Пример #3
0
def search(query, types=None, extended=False):
    if not types:
        types = ['movie', 'show', 'episode']
    search_params = {
        "url": API_URL + '/search/' + ",".join(types),
        "params": {
            'query': query,
            'extended': extended,
            'field': 'title'
        },
        "headers": get_headers()
    }
    r = safe_request('get', search_params)
    return r.json() if r else None
Пример #4
0
def plex_token_auth(login, password):
    auth_params = {
        "url": "https://plex.tv/users/sign_in.json",
        "data": {
            "user[login]": login,
            "user[password]": password
        },
        "headers": {
            "X-Plex-Client-Identifier": "com.iamkroot.trakt_scrobbler",
            "X-Plex-Product": "Trakt Scrobbler",
            "Accept": "application/json"
        }
    }
    resp = safe_request("post", auth_params)
    return resp and resp.json()["user"]["authToken"]
Пример #5
0
def add_to_history(media_info, updated_at, *args, **kwargs):
    watched_at = dt.utcfromtimestamp(updated_at).isoformat() + 'Z'
    history = prepare_history_data(watched_at=watched_at, **media_info)
    if not history:
        return
    params = {
        "url": API_URL + '/sync/history',
        "headers": get_headers(),
        "json": history
    }
    resp = safe_request('post', params)
    if not resp:
        return False
    added = resp.json()['added']
    return (media_info['type'] == 'movie' and added['movies'] > 0) or \
        (media_info['type'] == 'episode' and added['episodes'] > 0)
Пример #6
0
def refresh_token(token_data):
    exchange_params = {
        "url": API_URL + '/oauth/token',
        "headers": {
            "Content-Type": "application/json"
        },
        "json": {
            "refresh_token": token_data['refresh_token'],
            "client_id": CLIENT_ID,
            "client_secret": CLIENT_SECRET,
            "redirect_uri": "urn:ietf:wg:oauth:2.0:oob",
            "grant_type": "refresh_token"
        }
    }
    exchange_resp = safe_request('post', exchange_params)
    if exchange_resp and exchange_resp.status_code == 200:
        logger.info('Refreshed access token.')
        return exchange_resp.json()
    else:
        logger.info("Error refreshing token.")
Пример #7
0
def get_rates(currency, start_date, end_date):
    csv_file_path = generate_file_path(currency, start_date, end_date)
    if os.path.isfile(csv_file_path):
        return

    url = "http://www.nbrb.by/API/ExRates/Rates/Dynamics/{}?startDate={}&endDate={}"
    request = url.format(
        currency,
        "-".join(start_date.split("-")[::-1]),
        "-".join(end_date.split("-")[::-1]),
    )

    data = json.loads(safe_request(request).text)
    df = pd.DataFrame({
        "day": [date_format(i["Date"]) for i in data],
        "rate": [i["Cur_OfficialRate"] for i in data],
    })

    with open(generate_file_path(currency, start_date, end_date),
              "w") as csv_file:
        csv_file.write(df.to_csv(index=False))
Пример #8
0
def get_device_token(device_code):
    token_request_params = {
        "url": API_URL + "/oauth/device/token",
        "headers": {
            "Content-Type": "application/json"
        },
        "json": {
            "code": device_code,
            "client_id": CLIENT_ID,
            "client_secret": CLIENT_SECRET
        }
    }
    token_resp = safe_request('post', token_request_params)
    if not token_resp:
        return
    elif token_resp.status_code == 400:
        logger.info('Waiting for user to authorize the app.')
        return
    elif token_resp.status_code == 200:
        return token_resp.json()
    else:
        logger.error('Invalid status code of token response.')
        sys.exit(1)
Пример #9
0
    'types_EDI': 'on',
    'role_order': '0',
    'show_option': '0',
    'show_sotr': '0',
    'check_show_refs': 'on',
    'check_hide_doubles': 'on',
    'sortorder': '0',
    'order': '1',
    'itemboxid': '0',
}

proxy_gen = unblocked_proxies_generator()

# поисковый запрос с указанными параметрами
proxy = next(proxy_gen)
response, proxy = safe_request(url, headers, data, proxy, proxy_gen)

# получение числа ссылок и страниц выдачи
soup = BeautifulSoup(response.content, 'html.parser')
link_count = int(soup.select_one('td.redref b font').text)
pages_count = link_count // 100 + 1  # 100 ссылок на странице
print('Found %d links' % link_count)

links = []

for page_num in range(1, pages_count + 1):

    # запрос для заданной страницы
    data['pagenum'] = str(page_num)
    response, proxy = safe_request(url, headers, data, proxy, proxy_gen)
    soup = BeautifulSoup(response.content, 'html.parser')
Пример #10
0
logger.info('просмотрено %d ссылок' % len(visited_links))

# список ссылок для посещения
unvisited_links = list(set(links) - set(visited_links))
np.random.shuffle(unvisited_links)

logger.info('необходимо просмотреть %d ссылок' % len(unvisited_links))

proxy_gen = unblocked_proxies_generator()
proxy = next(proxy_gen)

for i, url in enumerate(unvisited_links):
    print('processing url %s (%d of %d)' % (url, i + 1, len(unvisited_links)))

    response, proxy = safe_request(url, headers, {}, proxy, proxy_gen)

    if not is_elib_blocked(response):
        soup = BeautifulSoup(response.content, 'html.parser')
        doi = soup.select_one('a[href*="doi.org"]')

        if doi:
            # сохраним ссылку на страницу с DOI в файл
            with open('links_doi.txt', 'a', encoding='utf8') as f:
                f.write('%s\n' % url)
            
            logger.info('url %s содержит DOI' % url)

        with open('visited_links.txt', 'a', encoding='utf8') as f:
            f.write('%s\n' % url)