def fb_extend_oauth_token(temp_access_token):
    url = _graph_url + "oauth/access_token"
    params = {
        "grant_type": "fb_exchange_token",
        "client_id": settings.FACEBOOK_APP_ID,
        "client_secret": settings.FACEBOOK_APP_SECRET,
        "fb_exchange_token": temp_access_token,
    }
    r = requests.get(url=url, params=params)
    token = parse_utf8_qsl(r.content)
    token["expires"] = dates.parse_ts(dates.now(ts=True) + int(token["expires"])).isoformat()
    return token
Example #2
0
def fb_extend_oauth_token(temp_access_token):
    url = _graph_url + "oauth/access_token"
    params = {
        'grant_type': 'fb_exchange_token',
        'client_id': settings.FACEBOOK_APP_ID,
        'client_secret': settings.FACEBOOK_APP_SECRET,
        'fb_exchange_token': temp_access_token
    }
    r = requests.get(url=url, params=params)
    token = parse_utf8_qsl(r.content)
    token['expires'] = dates.parse_ts(
        dates.now(ts=True) + int(token['expires'])).isoformat()
    return token
Example #3
0
def fb_extend_oauth_token(temp_access_token):
    url = _graph_url + "oauth/access_token"
    params = {
        'grant_type': 'fb_exchange_token',
        'client_id': settings.FACEBOOK_APP_ID,
        'client_secret': settings.FACEBOOK_APP_SECRET,
        'fb_exchange_token': temp_access_token
    }
    r = requests.get(url=url, params=params)
    token = parse_utf8_qsl(r.content)
    token['expires'] = dates.parse_ts(
        dates.now(ts=True) + int(token['expires'])).isoformat()
    return token
Example #4
0
def publish_date(soup, source_url=None):
    """
    Extract publish date from meta / source_url.
    """

    # try isodate first
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_iso(ds, enforce_tz=False)
            if dt:
                return dt

    # try a timestamp next.
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_ts(ds)
            if dt:
                return dt

    # try any date next.
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_any(ds, enforce_tz=False)
            if dt:
                return dt

    # fallback on url regex
    if source_url:
        dm = re_url_date.search(source_url)
        if dm:
            ds = dm.group(0)
            dt = dates.parse_any(ds, enforce_tz=False)
            if dt:
                return dt
Example #5
0
def publish_date(soup, source_url=None):
    """
    Extract publish date from meta / source_url.
    """

    # try isodate first
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_iso(ds, enforce_tz=False)
            if dt:
                return dt

    # try a timestamp next.
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_ts(ds)
            if dt:
                return dt

    # try any date next.
    for tag in PUBLISH_DATE_TAGS:
        ds = _extract_tag_data(soup, tag)
        if ds:
            dt = dates.parse_any(ds, enforce_tz=False)
            if dt:
                return dt

    # fallback on url regex
    if source_url:
        dm = re_url_date.search(source_url)
        if dm:
            ds = dm.group(0)
            dt = dates.parse_any(ds, enforce_tz=False)
            if dt:
                return dt