コード例 #1
0
ファイル: models.py プロジェクト: livlab/carebot
    def refresh(self):
        secrets = app_config.get_secrets()

        if not self.project.domain:
            return

        url = 'http://%s%s' % (self.project.domain, self.project.prefix)
        response = requests.get('https://free.sharedcount.com/url?apikey=%s&url=%s' % (secrets['SHAREDCOUNT_API_KEY'], url))

        if response.status_code != 200:
            print 'Failed to refresh social data from SharedCount: %i.' % response.status_code
            return

        print 'Updating social counts from SharedCount'

        data = response.json()

        self.facebook_likes = data['Facebook']['like_count'] or 0
        self.facebook_shares = data['Facebook']['share_count'] or 0
        self.facebook_comments = data['Facebook']['comment_count'] or 0
        self.twitter = data['Twitter'] or 0
        self.google = data['GooglePlusOne'] or 0
        self.pinterest = data['Pinterest'] or 0
        self.linkedin = data['LinkedIn'] or 0
        self.stumbleupon = data['StumbleUpon'] or 0

        self.last_update = timezone.now()

        self.save()
コード例 #2
0
ファイル: __init__.py プロジェクト: wbez/debates
def _test_api_init(freq=2627):
    secrets = app_config.get_secrets()
    url = secrets.get('VERB8TM_SRT_TEST_API', app_config.PROJECT_SLUG)
    header = 'Content-Type: application/json'
    payload = '{"command":"init","millisecondCaptionDelay":"%s"}' % (freq)
    command = 'curl -H "%s" -X POST -d \'%s\' %s' % (header, payload, url)
    local(command)
コード例 #3
0
ファイル: data.py プロジェクト: nprapps/books17
    def get_goodreads_id(cls, isbn):
        """
        Use GoodReads search API
        """
        secrets = app_config.get_secrets()

        goodreads_id = None
        search_api_tpl = 'https://www.goodreads.com/search/index.xml'

        params = {
            'key': secrets['GOODREADS_API_KEY'],
            'q': isbn.encode('utf-8')
        }
        query_string = urlencode(params)

        search_api_url = '%s?%s' % (search_api_tpl, query_string)

        # Get search api results.
        r = requests.get(search_api_url, params=params)

        if r.status_code == 200:
            tree = ElementTree.fromstring(r.content)
            best_book = tree.find('.//best_book')
            if best_book is not None:
                goodreads_id = best_book.find('id').text
            else:
                logger.warning('could not find a matching book for ISBN %s' % isbn)
        else:
            logger.warning('did not receive a 200 when using Goodreads search api')
        return goodreads_id
コード例 #4
0
ファイル: models.py プロジェクト: livlab/carebot
    def refresh(self):
        secrets = app_config.get_secrets()

        if not self.project.domain:
            return

        url = 'http://%s%s' % (self.project.domain, self.project.prefix)
        response = requests.get(
            'https://free.sharedcount.com/url?apikey=%s&url=%s' %
            (secrets['SHAREDCOUNT_API_KEY'], url))

        if response.status_code != 200:
            print 'Failed to refresh social data from SharedCount: %i.' % response.status_code
            return

        print 'Updating social counts from SharedCount'

        data = response.json()

        self.facebook_likes = data['Facebook']['like_count'] or 0
        self.facebook_shares = data['Facebook']['share_count'] or 0
        self.facebook_comments = data['Facebook']['comment_count'] or 0
        self.twitter = data['Twitter'] or 0
        self.google = data['GooglePlusOne'] or 0
        self.pinterest = data['Pinterest'] or 0
        self.linkedin = data['LinkedIn'] or 0
        self.stumbleupon = data['StumbleUpon'] or 0

        self.last_update = timezone.now()

        self.save()
コード例 #5
0
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=[production, staging])

    with settings(warn_only=True):
        local('mkdir confs/rendered')

    context = app_config.get_secrets()
    context['PROJECT_SLUG'] = app_config.PROJECT_SLUG
    context['CLOUD_SEARCH_DOMAIN'] = app_config.CLOUD_SEARCH_DOMAIN
    context['PROJECT_NAME'] = app_config.PROJECT_NAME
    context['DEPLOYMENT_TARGET'] = env.settings
    context['CONFIG_NAME'] = env.project_slug.replace('-', '').upper()

    for service, remote_path, extension in SERVICES:
        file_path = 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_SLUG,
                                                 service, extension)

        with open('confs/%s.%s' % (service, extension), 'r') as read_template:

            with open(file_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #6
0
ファイル: tumblr_utils.py プロジェクト: imclab/changinglives
def fetch_posts():
    """
    Returns a list of all tumblr posts, unsorted.
    """
    print "Starting."
    # Set constants
    secrets = app_config.get_secrets()
    base_url = 'http://api.tumblr.com/v2/blog/%s.tumblr.com/posts/photo' % app_config.TUMBLR_BLOG_ID
    key_param = '?api_key=%s' % secrets['TUMBLR_APP_KEY']
    limit_param = '&limit=20'
    limit = 20
    new_limit = limit
    post_list = []

    print base_url + key_param

    # Figure out the total number of posts.
    r = requests.get(base_url + key_param)
    total_count = int(json.loads(r.content)['response']['total_posts'])
    print "%s total posts available." % total_count

    # Do the pagination math.
    pages_count = (total_count / limit)
    pages_remainder = (total_count % limit)
    if pages_remainder > 0:
        pages_count += 1
    pages = range(0, pages_count)
    print "%s pages required." % len(pages)

    # Start requesting pages.
    # Note: Maximum of 20 posts per page.
    print "Requesting pages."
    for page in pages:

        # Update all of the pagination shenanigans.
        start_number = new_limit - limit
        end_number = new_limit
        if end_number > total_count:
            end_number = total_count
        new_limit = new_limit + limit
        page_param = '&offset=%s' % start_number
        page_url = base_url + key_param + limit_param + page_param

        # Actually fetch the page URL.
        r = requests.get(page_url)
        posts = json.loads(r.content)

        for post in posts['response']['posts']:
            try:
                if 'NSFW' in post['tags']:
                    pass
                elif 'nsfw' in post['tags']:
                    pass
                else:
                    post_list.append(post)
            except KeyError:
                pass

    return post_list
コード例 #7
0
def fetch_posts():
    """
    Returns a list of all tumblr posts, unsorted.
    """
    print "Starting."
    # Set constants
    secrets = app_config.get_secrets()
    base_url = 'http://api.tumblr.com/v2/blog/%s.tumblr.com/posts/photo' % app_config.TUMBLR_BLOG_ID
    key_param = '?api_key=%s' % secrets['TUMBLR_APP_KEY']
    limit_param = '&limit=20'
    limit = 20
    new_limit = limit
    post_list = []

    print base_url + key_param

    # Figure out the total number of posts.
    r = requests.get(base_url + key_param)
    total_count = int(json.loads(r.content)['response']['total_posts'])
    print "%s total posts available." % total_count

    # Do the pagination math.
    pages_count = (total_count / limit)
    pages_remainder = (total_count % limit)
    if pages_remainder > 0:
        pages_count += 1
    pages = range(0, pages_count)
    print "%s pages required." % len(pages)

    # Start requesting pages.
    # Note: Maximum of 20 posts per page.
    print "Requesting pages."
    for page in pages:

        # Update all of the pagination shenanigans.
        start_number = new_limit - limit
        end_number = new_limit
        if end_number > total_count:
            end_number = total_count
        new_limit = new_limit + limit
        page_param = '&offset=%s' % start_number
        page_url = base_url + key_param + limit_param + page_param

        # Actually fetch the page URL.
        r = requests.get(page_url)
        posts = json.loads(r.content)

        for post in posts['response']['posts']:
            try:
                if 'NSFW' in post['tags']:
                    pass
                elif 'nsfw' in post['tags']:
                    pass
                else:
                    post_list.append(post)
            except KeyError:
                pass

    return post_list
コード例 #8
0
def local_reset_db():
    secrets = app_config.get_secrets()

    with settings(warn_only=True):
        local('dropdb %s' % app_config.PROJECT_SLUG)
        local('echo "CREATE USER %s WITH PASSWORD \'%s\';" | psql' % (app_config.PROJECT_SLUG, secrets['POSTGRES_PASSWORD']))

    local('createdb -O %s %s' % (app_config.PROJECT_SLUG, app_config.PROJECT_SLUG))
コード例 #9
0
ファイル: app.py プロジェクト: nprapps/photo-finder
def index():
    """
    Example view demonstrating rendering a simple HTML page.
    """
    context = make_context()

    context['INSTAGRAM_CLIENT_ID'] = app_config.get_secrets()['INSTAGRAM_CLIENT_ID']

    return render_template('index.html', **context)
コード例 #10
0
ファイル: app.py プロジェクト: isabella232/photo-finder
def index():
    """
    Example view demonstrating rendering a simple HTML page.
    """
    context = make_context()

    context['INSTAGRAM_CLIENT_ID'] = app_config.get_secrets(
    )['INSTAGRAM_CLIENT_ID']

    return render_template('index.html', **context)
コード例 #11
0
def authenticate():
    secrets = app_config.get_secrets()

    auth = tweepy.OAuthHandler(secrets['TWITTER_CONSUMER_KEY'],
                               secrets['TWITTER_CONSUMER_SECRET'])
    auth.set_access_token(secrets['TWITTER_ACCESS_KEY'],
                          secrets['TWITTER_ACCESS_SECRET'])

    api = tweepy.API(auth)
    return api
コード例 #12
0
ファイル: __init__.py プロジェクト: wbez/debates
def verb8tm_stop():
    """
    Stops the generation of content in Verb8tm test API endpoint
    """
    secrets = app_config.get_secrets()
    url = secrets.get('VERB8TM_SRT_TEST_API', app_config.PROJECT_SLUG)
    header = 'Content-Type: application/json'
    payload = '{"command":"stop"}'
    command = 'curl -H "%s" -X POST -d \'%s\' %s' % (header, payload, url)
    local(command)
コード例 #13
0
ファイル: data.py プロジェクト: vibster/books13
def load_images():
    """
    Downloads images from Baker and Taylor.
    Eschews the API for a magic URL pattern, which is faster.
    """

    # Secrets.
    secrets = app_config.get_secrets()

    # Open the books JSON.
    with open("www/static-data/books.json", "rb") as readfile:
        books = json.loads(readfile.read())

    print "Start load_images(): %i books." % len(books)

    # Loop.
    for book in books:

        # Skip books with no title or ISBN.
        if book["title"] == "":
            continue

        if "isbn" not in book or book["isbn"] == "":
            continue

        # Construct the URL with secrets and the ISBN.
        book_url = "http://imagesa.btol.com/ContentCafe/Jacket.aspx?UserID=%s&Password=%s&Return=T&Type=L&Value=%s" % (
            secrets["BAKER_TAYLOR_USERID"],
            secrets["BAKER_TAYLOR_PASSWORD"],
            book["isbn"],
        )

        # Request the image.
        r = requests.get(book_url)

        path = "www/img/cover/%s.jpg" % book["slug"]

        # Write the image to www using the slug as the filename.
        with open(path, "wb") as writefile:
            writefile.write(r.content)

        file_size = os.path.getsize(path)

        if file_size < 10000:
            print "Image not available for ISBN: %s" % book["isbn"]

        image = Image.open(path)

        width = 250
        height = int((float(width) / image.size[0]) * image.size[1])

        image.thumbnail([width, height], Image.ANTIALIAS)
        image.save(path.replace(".jpg", "-thumb.jpg"), "JPEG")

    print "End."
コード例 #14
0
ファイル: data.py プロジェクト: nprapps/elections14
def local_reset_db():
    """
    Reset the database locally.
    """
    secrets = app_config.get_secrets()

    with settings(warn_only=True):
        local('dropdb %s' % app_config.PROJECT_SLUG)
        local('echo "CREATE USER %s WITH PASSWORD \'%s\';" | psql' % (app_config.PROJECT_SLUG, secrets['POSTGRES_PASSWORD']))

    local('createdb -O %s %s' % (app_config.PROJECT_SLUG, app_config.PROJECT_SLUG))
コード例 #15
0
ファイル: data.py プロジェクト: livlab/carebot
def local_reset_db():
    secrets = app_config.get_secrets()

    with settings(warn_only=True):
        local("dropdb %s" % app_config.PROJECT_SLUG)
        local(
            "echo \"CREATE USER %s WITH PASSWORD '%s';\" | psql"
            % (app_config.PROJECT_SLUG, secrets["POSTGRES_PASSWORD"])
        )

    local("createdb -O %s %s" % (app_config.PROJECT_SLUG, app_config.PROJECT_SLUG))
コード例 #16
0
def load_images():
    """
    Downloads images from Baker and Taylor.
    Eschews the API for a magic URL pattern, which is faster.
    """

    # Secrets.
    secrets = app_config.get_secrets()

    # Open the books JSON.
    with open('www/static-data/books.json', 'rb') as readfile:
        books = json.loads(readfile.read())

    print "Start load_images(): %i books." % len(books)

    # Loop.
    for book in books:

        # Skip books with no title or ISBN.
        if book['title'] == "":
            continue

        if 'isbn' not in book or book['isbn'] == "":
            continue

        # Construct the URL with secrets and the ISBN.
        book_url = "http://imagesa.btol.com/ContentCafe/Jacket.aspx?UserID=%s&Password=%s&Return=T&Type=L&Value=%s" % (
            secrets['BAKER_TAYLOR_USERID'], secrets['BAKER_TAYLOR_PASSWORD'],
            book['isbn'])

        # Request the image.
        r = requests.get(book_url)

        path = 'www/assets/cover/%s.jpg' % book['slug']

        # Write the image to www using the slug as the filename.
        with open(path, 'wb') as writefile:
            writefile.write(r.content)

        file_size = os.path.getsize(path)

        if file_size < 10000:
            print "Image not available for ISBN: %s" % book['isbn']

        image = Image.open(path)

        width = 250
        height = int((float(width) / image.size[0]) * image.size[1])

        image.thumbnail([width, height], Image.ANTIALIAS)
        image.save(path.replace('.jpg', '-thumb.jpg'), 'JPEG')

    print "End."
コード例 #17
0
ファイル: gs.py プロジェクト: isabella232/liveblog-1
def execute_setup(script_name=None, doc_id=None, log_id=None):
    """
    execute script setup: params script_id, document_id, log_id
    """

    require('settings', provided_by=['production', 'staging', 'development'])

    # Get secrets
    secrets = app_config.get_secrets()
    verb8tm_srt_url = secrets.get('VERB8TM_SRT_API',
                                  app_config.PROJECT_SLUG)
    verb8tm_timestamp_url = secrets.get('VERB8TM_TIMESTAMP_API',
                                        app_config.PROJECT_SLUG)

    # Get the script id from the script name and deployment target
    # prioritize passed in parameters
    if not script_name:
        script_name = '%s_%s' % (app_config.DEPLOYMENT_TARGET,
                                 app_config.SCRIPT_PROJECT_NAME)

    script_id = get_gas_project_id(script_name)

    URL_PREFIX = 'https://script.googleapis.com/v1/scripts/'

    # url
    url = '%s%s:run' % (URL_PREFIX, script_id)

    # Compose payload we pass documentID and logID to setup script properties
    payload = {
        'function': 'setup',
        'parameters': [verb8tm_srt_url,
                       verb8tm_timestamp_url,
                       app_config.LIVEBLOG_GDOC_KEY,
                       app_config.GAS_LOG_KEY]
    }

    kwargs = {
        'credentials': check_credentials(),
        'url': url,
        'method': 'POST',
        'headers': {'Content-Type': 'application/json'},
        'body': json.dumps(payload)
    }

    resp = send_api_request(kwargs)

    if resp.status == 200:
        return True
    else:
        print resp.status
        exit()
    return False
コード例 #18
0
def _geckoboard_push(widget_key, data):
    payload = {
        'api_key': app_config.get_secrets()['GECKOBOARD_API_KEY'],
        'data': data
    }

    response = requests.post(
        'https://push.geckoboard.com/v1/send/%s' % widget_key,
        json.dumps(payload))

    if response.status_code != 200:
        print 'Failed update update Geckoboard widget %s' % widget_key
        print response.content
コード例 #19
0
ファイル: cron_jobs.py プロジェクト: livlab/carebot
def _geckoboard_push(widget_key, data):
    payload = {
        'api_key': app_config.get_secrets()['GECKOBOARD_API_KEY'],
        'data': data
    }

    response = requests.post(
        'https://push.geckoboard.com/v1/send/%s' % widget_key,
        json.dumps(payload)
    )

    if response.status_code != 200:
        print 'Failed update update Geckoboard widget %s' % widget_key
        print response.content
コード例 #20
0
def mount(server=None, path='www', mount_point='mnt'):
    '''mount especiales server into local disk'''
    local_path = os.path.join(cwd, '..', mount_point)
    if os.path.exists(local_path):
        if os.listdir(local_path):
            logger.error('Mount point is not empty, choose other mount point')
            exit(1)
    else:
        os.makedirs(local_path)
    if not server:
        secrets = app_config.get_secrets()
        server = secrets.get('DEFAULT_SERVER', app_config.PROJECT_SLUG)
    command = SMB_MOUNT_TEMPLATE % (server, path, local_path)
    local(command)
    return local_path
コード例 #21
0
ファイル: data.py プロジェクト: isabella232/linklater
def fetch_tweets(username, days):
    """
    Get tweets of a specific user
    """
    current_time = datetime.now()    

    secrets = app_config.get_secrets()

    twitter_api = Twitter(
        auth=OAuth(
            secrets['TWITTER_API_OAUTH_TOKEN'],
            secrets['TWITTER_API_OAUTH_SECRET'],
            secrets['TWITTER_API_CONSUMER_KEY'],
            secrets['TWITTER_API_CONSUMER_SECRET']
        )
    )

    out = []    

    tweets = twitter_api.statuses.user_timeline(screen_name=username, count=TWITTER_BATCH_SIZE)

    i = 0

    while True:
        if i > (len(tweets)-1):
            break   

        tweet = tweets[i]

        created_time = datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y')

        time_difference = (current_time - created_time).days

        if time_difference > int(days):
            break     

        out.extend(_process_tweet(tweet, username))

        i += 1

        if i > (TWITTER_BATCH_SIZE-1):
            tweets = twitter_api.statuses.user_timeline(screen_name=username, count=TWITTER_BATCH_SIZE, max_id=tweet['id'])
            i = 0

    out = _dedupe_links(out)

    return out
コード例 #22
0
def dump_tumblr_json():
    secrets = app_config.get_secrets()

    t = Tumblpy(
        app_key=secrets['TUMBLR_APP_KEY'],
        app_secret=secrets['TUMBLR_APP_SECRET'],
        oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
        oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    limit = 10
    pages = range(0, 20)

    for page in pages:
        offset = page * limit
        posts = t.get('posts', blog_url=app_config.TUMBLR_URL, params={'limit': limit, 'offset': offset})

        with open('data/backups/tumblr_prod_%s.json' % page, 'w') as f:
            f.write(json.dumps(posts))
コード例 #23
0
def dump_tumblr_json():
    secrets = app_config.get_secrets()

    t = Tumblpy(
        app_key=secrets['TUMBLR_APP_KEY'],
        app_secret=secrets['TUMBLR_APP_SECRET'],
        oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
        oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    limit = 10
    pages = range(0, 20)

    for page in pages:
        offset = page * limit
        posts = t.get('posts', blog_url=app_config.TUMBLR_URL, params={'limit': limit, 'offset': offset})

        with open('data/backups/tumblr_prod_%s.json' % page, 'w') as f:
            f.write(json.dumps(posts))
コード例 #24
0
def deploy_to_tumblr():
    now = datetime.now()
    secrets = app_config.get_secrets()
    tumblr_api = pytumblr.TumblrRestClient(secrets['TUMBLR_CONSUMER_KEY'],
                                           secrets['TUMBLR_CONSUMER_SECRET'],
                                           secrets['TUMBLR_TOKEN'],
                                           secrets['TUMBLR_TOKEN_SECRET'])

    body = data.make_tumblr_draft_html()

    response = tumblr_api.create_text(env.tumblr_blog_name,
                                      state='draft',
                                      format='html',
                                      body=body.encode('utf8'))
    print "%s: Created tumblr draft (id: %s)" % (now.isoformat(),
                                                 response['id'])

    return response
コード例 #25
0
ファイル: servers.py プロジェクト: nprapps/elections14
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=['production', 'staging'])

    # Copy the app_config so that when we load the secrets they don't
    # get exposed to other management commands
    context = copy.copy(app_config.__dict__)
    context.update(app_config.get_secrets())

    for service, remote_path, extension in app_config.SERVER_SERVICES:
        template_path = _get_template_conf_path(service, extension)
        rendered_path = _get_rendered_conf_path(service, extension)

        with open(template_path,  'r') as read_template:

            with open(rendered_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #26
0
ファイル: servers.py プロジェクト: isabella232/elections14
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=['production', 'staging'])

    # Copy the app_config so that when we load the secrets they don't
    # get exposed to other management commands
    context = copy.copy(app_config.__dict__)
    context.update(app_config.get_secrets())

    for service, remote_path, extension in app_config.SERVER_SERVICES:
        template_path = _get_template_conf_path(service, extension)
        rendered_path = _get_rendered_conf_path(service, extension)

        with open(template_path, 'r') as read_template:

            with open(rendered_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #27
0
def write_test_posts():
    """
    Writes test posts to our test blog as defined by app_config.py
    """

    # This is how many posts will be written.
    TOTAL_NUMBER = 9

    secrets = app_config.get_secrets()

    t = Tumblpy(app_key=secrets['TUMBLR_APP_KEY'],
                app_secret=secrets['TUMBLR_APP_SECRET'],
                oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
                oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    tags = ['featured', '']

    images = [
        'http://media.npr.org/assets/img/2013/04/24/habitablezones_custom-fa87578c6e6a97788b92a0ecac956b9098607aa6-s40.jpg',
        'http://media.npr.org/assets/img/2013/04/24/ocpack-32260770b4090f86ddeb7502175a631d50c3b4a1-s51.jpg',
        'http://media.npr.org/assets/img/2013/04/24/dalrymple-c-karoki-lewis-4c9bd790639c870d51c670cbecbca4b802b82b1a-s51.jpg',
        'http://media.npr.org/assets/img/2013/04/24/ap111231019469-46289d097a45801ed2ca3464da14b13be40e5adb-s51.jpg'
    ]

    n = 0
    while n < TOTAL_NUMBER:
        image = choice(images)
        tag = choice(tags)
        caption = u"<p class='intro'>Introduction,</p><p class='message'>This is a test post.</p><p class='signature-name'>Sincerely,<br/>Test from Test, Test</p>"
        tumblr_post = t.post('post',
                             blog_url=app_config.TUMBLR_URL,
                             params={
                                 'type': 'photo',
                                 'caption': caption,
                                 'tags': tag,
                                 'source': image
                             })

        print n, tumblr_post['id']

        n += 1
コード例 #28
0
ファイル: data.py プロジェクト: nprapps/elections14
def create_tables():
    """
    Create the databse tables.
    """
    import models

    secrets = app_config.get_secrets()

    print 'Creating database tables'

    models.Race.create_table()
    models.Candidate.create_table()
    models.Slide.create_table()
    models.SlideSequence.create_table()

    print 'Setting up admin'

    admin_app.auth.User.create_table()
    admin_user = admin_app.auth.User(username='******', email='', admin=True, active=True)
    admin_user.set_password(secrets.get('ADMIN_PASSWORD'))
    admin_user.save()
コード例 #29
0
def generate_rdio_playlist():
    """
    Generate a list of Rdio track IDs
    """

    secrets = app_config.get_secrets()
    state = {}
    rdio_api = Rdio(secrets['RDIO_CONSUMER_KEY'],
                    secrets['RDIO_CONSUMER_SECRET'], state)
    songs = []

    with open('data/songs.csv') as f:
        rows = csv.DictReader(f)

        for row in rows:
            if row['rdio']:
                song_object = rdio_api.getObjectFromUrl(url=row['rdio'])
                if song_object['type'] == 't':
                    songs.append(song_object['key'])

    print ','.join(songs)
コード例 #30
0
def write_test_posts():
    """
    Writes test posts to our test blog as defined by app_config.py
    """

    # This is how many posts will be written.
    TOTAL_NUMBER = 9

    secrets = app_config.get_secrets()

    t = Tumblpy(
        app_key=secrets['TUMBLR_APP_KEY'],
        app_secret=secrets['TUMBLR_APP_SECRET'],
        oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
        oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    tags = ['featured', '']

    images = [
        'http://media.npr.org/assets/img/2013/04/24/habitablezones_custom-fa87578c6e6a97788b92a0ecac956b9098607aa6-s40.jpg',
        'http://media.npr.org/assets/img/2013/04/24/ocpack-32260770b4090f86ddeb7502175a631d50c3b4a1-s51.jpg',
        'http://media.npr.org/assets/img/2013/04/24/dalrymple-c-karoki-lewis-4c9bd790639c870d51c670cbecbca4b802b82b1a-s51.jpg',
        'http://media.npr.org/assets/img/2013/04/24/ap111231019469-46289d097a45801ed2ca3464da14b13be40e5adb-s51.jpg'
    ]

    n = 0
    while n < TOTAL_NUMBER:
        image = choice(images)
        tag = choice(tags)
        caption = u"<p class='intro'>Introduction,</p><p class='message'>This is a test post.</p><p class='signature-name'>Sincerely,<br/>Test from Test, Test</p>"
        tumblr_post = t.post('post', blog_url=app_config.TUMBLR_URL, params={
            'type': 'photo',
            'caption': caption,
            'tags': tag,
            'source': image
        })

        print n, tumblr_post['id']

        n += 1
コード例 #31
0
ファイル: fabfile.py プロジェクト: nprapps/drumfill
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=[production, staging])

    with settings(warn_only=True):
        local('mkdir confs/rendered')

    context = app_config.get_secrets()
    context['PROJECT_SLUG'] = app_config.PROJECT_SLUG
    context['PROJECT_NAME'] = app_config.PROJECT_NAME
    context['DEPLOYMENT_TARGET'] = env.settings

    for service, remote_path in SERVICES:
        file_path = 'confs/rendered/%s.%s.conf' % (app_config.PROJECT_SLUG, service)

        with open('confs/%s.conf' % service, 'r') as read_template:

            with open(file_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #32
0
ファイル: data.py プロジェクト: isabella232/bestsongs15
def generate_spotify_playlist():
    """
    Generate a list of Spotify track IDs
    """

    sp = spotipy.Spotify()
    songs = []
    secrets = app_config.get_secrets()

    with open('data/songs.csv') as f:
        rows = csv.DictReader(f)

        for row in rows:
            if row['spotify'] and "track" in row['spotify']:
                try:
                    song_object = sp.track(track_id=row['spotify'])
                    if song_object['track_number']:
                        songs.append(song_object['uri'])
                except:
                    print 'invalid ID'

    print ','.join(songs)
コード例 #33
0
ファイル: fabfile.py プロジェクト: nprapps/disability
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=[production, staging])

    with settings(warn_only=True):
        local('mkdir confs/rendered')

    context = app_config.get_secrets()
    context['PROJECT_SLUG'] = app_config.PROJECT_SLUG
    context['PROJECT_NAME'] = app_config.PROJECT_NAME
    context['DEPLOYMENT_TARGET'] = env.settings

    for service, remote_path in SERVICES:
        file_path = 'confs/rendered/%s.%s.conf' % (app_config.PROJECT_SLUG, service)

        with open('confs/%s.conf' % service, 'r') as read_template:

            with open(file_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #34
0
ファイル: fabfile.py プロジェクト: nprapps/drumfill
def render_confs():
    """
    Renders server configurations.
    """
    require("settings", provided_by=[production, staging])

    with settings(warn_only=True):
        local("mkdir confs/rendered")

    context = app_config.get_secrets()
    context["PROJECT_SLUG"] = app_config.PROJECT_SLUG
    context["PROJECT_NAME"] = app_config.PROJECT_NAME
    context["DEPLOYMENT_TARGET"] = env.settings

    for service, remote_path in SERVICES:
        file_path = "confs/rendered/%s.%s.conf" % (app_config.PROJECT_SLUG, service)

        with open("confs/%s.conf" % service, "r") as read_template:

            with open(file_path, "wb") as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #35
0
ファイル: data.py プロジェクト: nprapps/bestsongs15-midyear
def generate_spotify_playlist():
    """
    Generate a list of Spotify track IDs
    """

    sp = spotipy.Spotify()
    songs = []
    secrets = app_config.get_secrets()

    with open('data/songs.csv') as f:
        rows = csv.DictReader(f)

        for row in rows:
            if row['spotify'] and "track" in row['spotify']:
                try:
                    song_object = sp.track(track_id=row['spotify'])
                    if song_object['track_number']:
                        songs.append(song_object['uri'])
                except:
                    print 'invalid ID'

    print ','.join(songs)
コード例 #36
0
def update():
    """
    Fetch latests posts from Tumblr API.
    """
    secrets = app_config.get_secrets()

    offset = 0
    total_posts = 1

    while offset < total_posts:
        print 'Fetching posts %i-%i' % (offset, offset + LIMIT)

        response = requests.get('http://api.tumblr.com/v2/blog/%s.tumblr.com/posts' % app_config.TUMBLR_NAME, params={
            'api_key':secrets['TUMBLR_CONSUMER_KEY'],
            'limit': LIMIT,
            'offset': offset
        })

        data = response.json()

        total_posts = data['response']['total_posts']
        posts = data['response']['posts']

        for post in posts:
            if post['type'] in UNSUPPORTED_TYPES:
                continue

            if datetime.fromtimestamp(post['timestamp']) < app_config.TUMBLR_NOT_BEFORE:
                print 'Skipping old post'
                continue

            elif datetime.fromtimestamp(post['timestamp']) > app_config.TUMBLR_NOT_AFTER:
                print 'Skipping newer post'
                continue

            _create_slide(post)

        offset += LIMIT
コード例 #37
0
ファイル: __init__.py プロジェクト: TylerFisher/housing2
def render_confs():
    """
    Renders server configurations.
    """
    require("settings", provided_by=[production, staging])

    with settings(warn_only=True):
        local("mkdir confs/rendered")

    # Copy the app_config so that when we load the secrets they don't
    # get exposed to other management commands
    context = copy.copy(app_config.__dict__)
    context.update(app_config.get_secrets())

    for service, remote_path, extension in app_config.SERVER_SERVICES:
        template_path = _get_template_conf_path(service, extension)
        rendered_path = _get_rendered_conf_path(service, extension)

        with open(template_path, "r") as read_template:

            with open(rendered_path, "wb") as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #38
0
ファイル: liveblog.py プロジェクト: nprapps/elections14
def update():
    """
    Fetch latests posts from Tumblr API.
    """
    secrets = app_config.get_secrets()

    offset = 0
    total_posts = 1

    while offset < total_posts:
        print "Fetching posts %i-%i" % (offset, offset + LIMIT)

        response = requests.get(
            "http://api.tumblr.com/v2/blog/%s.tumblr.com/posts" % app_config.TUMBLR_NAME,
            params={"api_key": secrets["TUMBLR_CONSUMER_KEY"], "limit": LIMIT, "offset": offset},
        )

        data = response.json()

        total_posts = data["response"]["total_posts"]
        posts = data["response"]["posts"]

        for post in posts:
            if post["type"] in UNSUPPORTED_TYPES:
                continue

            if datetime.fromtimestamp(post["timestamp"]) < app_config.TUMBLR_NOT_BEFORE:
                print "Skipping old post"
                continue

            elif datetime.fromtimestamp(post["timestamp"]) > app_config.TUMBLR_NOT_AFTER:
                print "Skipping newer post"
                continue

            _create_slide(post)

        offset += LIMIT
コード例 #39
0
ファイル: fabfile.py プロジェクト: imclab/playgrounds2
def render_confs():
    """
    Renders server configurations.
    """
    require('settings', provided_by=[production, staging])

    with settings(warn_only=True):
        local('mkdir confs/rendered')

    context = app_config.get_secrets()
    context['PROJECT_SLUG'] = app_config.PROJECT_SLUG
    context['CLOUD_SEARCH_DOMAIN'] = app_config.CLOUD_SEARCH_DOMAIN
    context['PROJECT_NAME'] = app_config.PROJECT_NAME
    context['DEPLOYMENT_TARGET'] = env.settings
    context['CONFIG_NAME'] = env.project_slug.replace('-', '').upper()

    for service, remote_path, extension in SERVICES:
        file_path = 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_SLUG, service, extension)

        with open('confs/%s.%s' % (service, extension),  'r') as read_template:

            with open(file_path, 'wb') as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #40
0
ファイル: fabfile.py プロジェクト: nprapps/playgrounds2
def render_confs():
    """
    Renders server configurations.
    """
    require("settings", provided_by=[production, staging])

    with settings(warn_only=True):
        local("mkdir confs/rendered")

    context = app_config.get_secrets()
    context["PROJECT_SLUG"] = app_config.PROJECT_SLUG
    context["CLOUD_SEARCH_DOMAIN"] = app_config.CLOUD_SEARCH_DOMAIN
    context["PROJECT_NAME"] = app_config.PROJECT_NAME
    context["DEPLOYMENT_TARGET"] = env.settings
    context["CONFIG_NAME"] = env.project_slug.replace("-", "").upper()

    for service, remote_path, extension in SERVICES:
        file_path = "confs/rendered/%s.%s.%s" % (app_config.PROJECT_SLUG, service, extension)

        with open("confs/%s.%s" % (service, extension), "r") as read_template:

            with open(file_path, "wb") as write_template:
                payload = Template(read_template.read())
                write_template.write(payload.render(**context))
コード例 #41
0
ファイル: data.py プロジェクト: nprapps/bestsongs15-midyear
def generate_rdio_playlist():
    """
    Generate a list of Rdio track IDs
    """

    secrets = app_config.get_secrets()
    state = {}
    rdio_api = Rdio(
        secrets['RDIO_CONSUMER_KEY'],
        secrets['RDIO_CONSUMER_SECRET'],
        state
    )
    songs = []

    with open('data/songs.csv') as f:
        rows = csv.DictReader(f)

        for row in rows:
            if row['rdio']:
                song_object = rdio_api.getObjectFromUrl(url=row['rdio'])
                if song_object['type'] == 't':
                    songs.append(song_object['key'])

    print ','.join(songs)
コード例 #42
0
ファイル: data.py プロジェクト: isabella232/elections14
def create_tables():
    """
    Create the databse tables.
    """
    import models

    secrets = app_config.get_secrets()

    print 'Creating database tables'

    models.Race.create_table()
    models.Candidate.create_table()
    models.Slide.create_table()
    models.SlideSequence.create_table()

    print 'Setting up admin'

    admin_app.auth.User.create_table()
    admin_user = admin_app.auth.User(username='******',
                                     email='',
                                     admin=True,
                                     active=True)
    admin_user.set_password(secrets.get('ADMIN_PASSWORD'))
    admin_user.save()
コード例 #43
0
def update_featured_social():
    """
    Update featured tweets
    """
    COPY = copytext.Copy(app_config.COPY_PATH)
    secrets = app_config.get_secrets()

    # Twitter
    print 'Fetching tweets...'

    twitter_api = Twitter(auth=OAuth(secrets['TWITTER_API_OAUTH_TOKEN'],
                                     secrets['TWITTER_API_OAUTH_SECRET'],
                                     secrets['TWITTER_API_CONSUMER_KEY'],
                                     secrets['TWITTER_API_CONSUMER_SECRET']))

    tweets = []

    for i in range(1, 4):
        tweet_url = COPY['share']['featured_tweet%i' % i]

        if isinstance(tweet_url,
                      copytext.Error) or unicode(tweet_url).strip() == '':
            continue

        tweet_id = unicode(tweet_url).split('/')[-1]

        tweet = twitter_api.statuses.show(id=tweet_id)

        creation_date = datetime.strptime(tweet['created_at'],
                                          '%a %b %d %H:%M:%S +0000 %Y')
        creation_date = '%s %i' % (creation_date.strftime('%b'),
                                   creation_date.day)

        tweet_url = 'http://twitter.com/%s/status/%s' % (
            tweet['user']['screen_name'], tweet['id'])

        photo = None
        html = tweet['text']
        subs = {}

        for media in tweet['entities'].get('media', []):
            original = tweet['text'][media['indices'][0]:media['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (
                media['url'], app_config.PROJECT_SLUG, tweet_url,
                media['display_url'])

            subs[original] = replacement

            if media['type'] == 'photo' and not photo:
                photo = {'url': media['media_url']}

        for url in tweet['entities'].get('urls', []):
            original = tweet['text'][url['indices'][0]:url['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (
                url['url'], app_config.PROJECT_SLUG, tweet_url,
                url['display_url'])

            subs[original] = replacement

        for hashtag in tweet['entities'].get('hashtags', []):
            original = tweet['text'][
                hashtag['indices'][0]:hashtag['indices'][1]]
            replacement = '<a href="https://twitter.com/hashtag/%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'hashtag\', 0, \'%s\']);">%s</a>' % (
                hashtag['text'], app_config.PROJECT_SLUG, tweet_url,
                '#%s' % hashtag['text'])

            subs[original] = replacement

        for original, replacement in subs.items():
            html = html.replace(original, replacement)

        # https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
        tweets.append({
            'id': tweet['id'],
            'url': tweet_url,
            'html': html,
            'favorite_count': tweet['favorite_count'],
            'retweet_count': tweet['retweet_count'],
            'user': {
                'id': tweet['user']['id'],
                'name': tweet['user']['name'],
                'screen_name': tweet['user']['screen_name'],
                'profile_image_url': tweet['user']['profile_image_url'],
                'url': tweet['user']['url'],
            },
            'creation_date': creation_date,
            'photo': photo
        })

    # Facebook
    print 'Fetching Facebook posts...'

    fb_api = GraphAPI(secrets['FACEBOOK_API_APP_TOKEN'])

    facebook_posts = []

    for i in range(1, 4):
        fb_url = COPY['share']['featured_facebook%i' % i]

        if isinstance(fb_url, copytext.Error) or unicode(fb_url).strip() == '':
            continue

        fb_id = unicode(fb_url).split('/')[-1]

        post = fb_api.get_object(fb_id)
        user = fb_api.get_object(post['from']['id'])
        user_picture = fb_api.get_object('%s/picture' % post['from']['id'])
        likes = fb_api.get_object('%s/likes' % fb_id, summary='true')
        comments = fb_api.get_object('%s/comments' % fb_id, summary='true')
        #shares = fb_api.get_object('%s/sharedposts' % fb_id)

        creation_date = datetime.strptime(post['created_time'],
                                          '%Y-%m-%dT%H:%M:%S+0000')
        creation_date = '%s %i' % (creation_date.strftime('%b'),
                                   creation_date.day)

        # https://developers.facebook.com/docs/graph-api/reference/v2.0/post
        facebook_posts.append({
            'id': post['id'],
            'message': post['message'],
            'link': {
                'url': post['link'],
                'name': post['name'],
                'caption': (post['caption'] if 'caption' in post else None),
                'description': post['description'],
                'picture': post['picture']
            },
            'from': {
                'name': user['name'],
                'link': user['link'],
                'picture': user_picture['url']
            },
            'likes': likes['summary']['total_count'],
            'comments': comments['summary']['total_count'],
            #'shares': shares['summary']['total_count'],
            'creation_date': creation_date
        })

    # Render to JSON
    output = {'tweets': tweets, 'facebook_posts': facebook_posts}

    with open('data/featured.json', 'w') as f:
        json.dump(output, f)
コード例 #44
0
"""
import app_config
import dataset
import ipdb
import logging
import requests

from datetime import datetime
from dateutil import parser
from fabric.api import local, task
from facebook import GraphAPI
from pyquery import PyQuery
from scrapers.utils import get_art_root_url, get_seamus_id_from_url
from scrapers.seamus.models import Story

SECRETS = app_config.get_secrets()
FACEBOOK_USER = '******'

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


@task
def local_bootstrap():
    local('dropdb --if-exists %s' % app_config.PROJECT_SLUG)
    local('createdb %s' % app_config.PROJECT_SLUG)


@task
def drop_tables():
コード例 #45
0
"""

import app_config
import json
import logging
import requests

from datetime import datetime
from fabric.api import local, require, task

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

today = datetime.now().strftime('%Y-%m-%d')
WEBHOOK = app_config.get_secrets()['WEBHOOK']
URL = 'https://www.federalregister.gov/api/v1/documents.json'

COLORS_DICT = {
    'Rule': '#5cb85c',
    'Proposed Rule': '#5bc0de',
    'Presidential Document': '#d9534f'
}


@task
def post_message():
    """
    Example cron task. Note we use "local" instead of "run"
    because this will run on the server.
    """
コード例 #46
0
ファイル: scraper.py プロジェクト: ebigalee/graeae
#!/usr/bin/env python

from datetime import datetime
import logging
import os
import requests

from pyquery import PyQuery

from app_config import get_secrets
from models import ApiEntry, Article

SECRETS = get_secrets()

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

class HomepageScraper:
    url = 'http://npr.org'

    def __init__(self):
        self.run_time = datetime.utcnow()

    def scrape_homepage(self, **kwargs):
        """
        Scrape!
        """
        logger.info('Scraping homepage (start time: %s)' % self.run_time)

        if not kwargs:
コード例 #47
0
ファイル: models.py プロジェクト: nprapps/breaking-news-facts
import datetime
import time

from peewee import *

from app_config import get_secrets

secrets = get_secrets()

psql_db = PostgresqlDatabase("breaking", user=secrets["APPS_USER"], password=secrets["APPS_PASS"])


def delete_tables():
    try:
        Event.drop_table()
    except:
        pass

    try:
        Fact.drop_table()
    except:
        pass


def create_tables():
    Event.create_table()
    Fact.create_table()


class Event(Model):
    """
コード例 #48
0
ファイル: gs.py プロジェクト: wbez/debates
def execute_setup(name=None,
                  doc_id=None,
                  log_id=None,
                  cspan=None,
                  cspan_server=None):
    """
    execute script setup
    params:
    name - script name
    document_id - associated google doc
    log_id - associated google logs spreadsheet
    cspan - False to use Verb8tm, True to use CSPAN
    cspan_server - url that will serve the CSPAN captions
    """

    require('settings', provided_by=['production', 'staging', 'development'])
    # Get secrets
    secrets = app_config.get_secrets()
    verb8tm_srt_url = secrets.get('VERB8TM_SRT_API', app_config.PROJECT_SLUG)
    verb8tm_timestamp_url = secrets.get('VERB8TM_TIMESTAMP_API',
                                        app_config.PROJECT_SLUG)
    if cspan:
        pass
    else:
        cspan = str(app_config.CSPAN)

    ec2_public_dns_tpl = 'http://ec2-%s.compute-1.amazonaws.com:5000/'
    if cspan_server:
        pass
    else:
        if len(app_config.SERVERS):
            ip = app_config.SERVERS[0]
            ip = ip.replace('.', '-')
            cspan_server = ec2_public_dns_tpl % (ip)
        else:
            logger.error("no servers found, please specify a cspan_server")
            exit()

    # Get the script id from the script name and deployment target
    # prioritize passed in parameters
    if not name:
        name = '%s_%s' % (app_config.DEPLOYMENT_TARGET,
                          app_config.SCRIPT_PROJECT_NAME)

    script_id = get_gas_project_id(name)

    URL_PREFIX = 'https://script.googleapis.com/v1/scripts/'

    # url
    url = '%s%s:run' % (URL_PREFIX, script_id)

    # Compose payload we pass documentID and logID to setup script properties
    payload = {
        'function':
        'setup',
        'parameters': [
            verb8tm_srt_url, verb8tm_timestamp_url, cspan_server, cspan,
            app_config.TRANSCRIPT_GDOC_KEY, app_config.GAS_LOG_KEY
        ]
    }

    kwargs = {
        'credentials': check_credentials(),
        'url': url,
        'method': 'POST',
        'headers': {
            'Content-Type': 'application/json'
        },
        'body': json.dumps(payload)
    }

    resp = send_api_request(kwargs)

    if resp.status == 200:
        return True
    else:
        print resp.status
        exit()
    return False
コード例 #49
0
ファイル: public_app.py プロジェクト: vfulco/pixelcite
import argparse
import base64
from cStringIO import StringIO
import datetime
import logging

from flask import Flask, abort, redirect, render_template, request, session, url_for
from twython import Twython

import app_config
from render_utils import make_context

app = Flask(__name__)
app.debug = app_config.DEBUG
app.secret_key = app_config.get_secrets()["SESSION_KEY"]

file_handler = logging.FileHandler(app_config.APP_LOG_PATH)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)

secrets = app_config.get_secrets()

# Example application views
@app.route("/test/", methods=["GET"])
def _test_app():
    """
    Test route for verifying the application is running.
    """
    app.logger.info("Test URL requested.")
コード例 #50
0
ファイル: public_app.py プロジェクト: imclab/changinglives
            'ERROR', '500', e, app_config.SERVERS[0], svg_path, name))

        # These bits build a nicer error page that has the real stack trace on it.
        context = {}
        context['title'] = 'CairoSVG is unhappy.'
        context['message'] = e.output
        return render_template('500.html', **context)

    context = {
        'name': name,
        'location': location,
    }

    caption = render_template('caption.html', **context)

    secrets = app_config.get_secrets()

    t = Tumblpy(
        app_key=secrets['TUMBLR_APP_KEY'],
        app_secret=secrets['TUMBLR_APP_SECRET'],
        oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
        oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    params = {
        "type": "photo",
        "caption": caption,
        "tags": app_config.TUMBLR_TAGS,
        "source": "http://%s%s" % (app_config.SERVERS[0], png_path)
    }

    try:
コード例 #51
0
ファイル: data.py プロジェクト: nprapps/bestsongs15-midyear
def update_featured_social():
    """
    Update featured tweets
    """
    COPY = copytext.Copy(app_config.COPY_PATH)
    secrets = app_config.get_secrets()

    # Twitter
    print 'Fetching tweets...'

    twitter_api = Twitter(
        auth=OAuth(
            secrets['TWITTER_API_OAUTH_TOKEN'],
            secrets['TWITTER_API_OAUTH_SECRET'],
            secrets['TWITTER_API_CONSUMER_KEY'],
            secrets['TWITTER_API_CONSUMER_SECRET']
        )
    )

    tweets = []

    for i in range(1, 4):
        tweet_url = COPY['share']['featured_tweet%i' % i]

        if isinstance(tweet_url, copytext.Error) or unicode(tweet_url).strip() == '':
            continue

        tweet_id = unicode(tweet_url).split('/')[-1]

        tweet = twitter_api.statuses.show(id=tweet_id)

        creation_date = datetime.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
        creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)

        tweet_url = 'http://twitter.com/%s/status/%s' % (tweet['user']['screen_name'], tweet['id'])

        photo = None
        html = tweet['text']
        subs = {}

        for media in tweet['entities'].get('media', []):
            original = tweet['text'][media['indices'][0]:media['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (media['url'], app_config.PROJECT_SLUG, tweet_url, media['display_url'])

            subs[original] = replacement

            if media['type'] == 'photo' and not photo:
                photo = {
                    'url': media['media_url']
                }

        for url in tweet['entities'].get('urls', []):
            original = tweet['text'][url['indices'][0]:url['indices'][1]]
            replacement = '<a href="%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'link\', 0, \'%s\']);">%s</a>' % (url['url'], app_config.PROJECT_SLUG, tweet_url, url['display_url'])

            subs[original] = replacement

        for hashtag in tweet['entities'].get('hashtags', []):
            original = tweet['text'][hashtag['indices'][0]:hashtag['indices'][1]]
            replacement = '<a href="https://twitter.com/hashtag/%s" target="_blank" onclick="_gaq.push([\'_trackEvent\', \'%s\', \'featured-tweet-action\', \'hashtag\', 0, \'%s\']);">%s</a>' % (hashtag['text'], app_config.PROJECT_SLUG, tweet_url, '#%s' % hashtag['text'])

            subs[original] = replacement

        for original, replacement in subs.items():
            html =  html.replace(original, replacement)

        # https://dev.twitter.com/docs/api/1.1/get/statuses/show/%3Aid
        tweets.append({
            'id': tweet['id'],
            'url': tweet_url,
            'html': html,
            'favorite_count': tweet['favorite_count'],
            'retweet_count': tweet['retweet_count'],
            'user': {
                'id': tweet['user']['id'],
                'name': tweet['user']['name'],
                'screen_name': tweet['user']['screen_name'],
                'profile_image_url': tweet['user']['profile_image_url'],
                'url': tweet['user']['url'],
            },
            'creation_date': creation_date,
            'photo': photo
        })

    # Facebook
    print 'Fetching Facebook posts...'

    fb_api = GraphAPI(secrets['FACEBOOK_API_APP_TOKEN'])

    facebook_posts = []

    for i in range(1, 4):
        fb_url = COPY['share']['featured_facebook%i' % i]

        if isinstance(fb_url, copytext.Error) or unicode(fb_url).strip() == '':
            continue

        fb_id = unicode(fb_url).split('/')[-1]

        post = fb_api.get_object(fb_id)
        user  = fb_api.get_object(post['from']['id'])
        user_picture = fb_api.get_object('%s/picture' % post['from']['id'])
        likes = fb_api.get_object('%s/likes' % fb_id, summary='true')
        comments = fb_api.get_object('%s/comments' % fb_id, summary='true')
        #shares = fb_api.get_object('%s/sharedposts' % fb_id)

        creation_date = datetime.strptime(post['created_time'],'%Y-%m-%dT%H:%M:%S+0000')
        creation_date = '%s %i' % (creation_date.strftime('%b'), creation_date.day)

        # https://developers.facebook.com/docs/graph-api/reference/v2.0/post
        facebook_posts.append({
            'id': post['id'],
            'message': post['message'],
            'link': {
                'url': post['link'],
                'name': post['name'],
                'caption': (post['caption'] if 'caption' in post else None),
                'description': post['description'],
                'picture': post['picture']
            },
            'from': {
                'name': user['name'],
                'link': user['link'],
                'picture': user_picture['url']
            },
            'likes': likes['summary']['total_count'],
            'comments': comments['summary']['total_count'],
            #'shares': shares['summary']['total_count'],
            'creation_date': creation_date
        })

    # Render to JSON
    output = {
        'tweets': tweets,
        'facebook_posts': facebook_posts
    }

    with open('data/featured.json', 'w') as f:
        json.dump(output, f)
コード例 #52
0
"""

import app_config
import json
import logging
import os
import requests

from fabric.api import local, require, task
from pyquery import PyQuery as pq

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)

secrets = app_config.get_secrets()

WEBHOOK = secrets['WEBHOOK']


@task
def post_message():
    message = check_page()
    if message:
        r = requests.post(WEBHOOK, data=json.dumps(message))
        logger.info(r.text)


def check_page():
    attachments = []
    last_title = get_last_title()
コード例 #53
0
ファイル: scraper.py プロジェクト: isabella232/graeae
#!/usr/bin/env python

from datetime import datetime
import logging
import os
import requests

from pyquery import PyQuery

from app_config import get_secrets
from models import ApiEntry, Article

SECRETS = get_secrets()

logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


class HomepageScraper:
    url = 'http://npr.org'

    def __init__(self):
        self.run_time = datetime.utcnow()

    def scrape_homepage(self, **kwargs):
        """
        Scrape!
        """
        logger.info('Scraping homepage (start time: %s)' % self.run_time)
コード例 #54
0
ファイル: app.py プロジェクト: stlpublicradio/lunchbox
def boundaries():
    context = make_context()
    context['name'] = 'Boundaries'
    context['id'] = context['name']
    context['secrets'] = app_config.get_secrets()
    return make_response(render_template('boundaries.html', **context))
コード例 #55
0
ファイル: ap.py プロジェクト: nprapps/elections14
#!/usr/bin/env python

import app_config
import json
import os
import time

from datetime import datetime
from elections import AP
from fabric.api import task
from time import sleep

SECRETS = app_config.get_secrets()

# If main FTP server goes down, uncomment the next line
#AP.FTP_HOSTNAME = 'electionsonline2.ap.org'

def _generate_candidate_id(candidate, race):
    """
    Makes an unique compound ID
    """
    return '%s-%s' % (candidate.ap_polra_number, race.state_postal)

@task
def bootstrap():
    init()
    update()

@task
def init(output_dir='data'):
    """
コード例 #56
0
ファイル: data.py プロジェクト: helgalivsalinas/books14
def load_images():
    """
    Downloads images from Baker and Taylor.
    Eschews the API for a magic URL pattern, which is faster.
    """

    # Secrets.
    secrets = app_config.get_secrets()

    # Open the books JSON.
    with open('www/static-data/books.json', 'rb') as readfile:
        books = json.loads(readfile.read())

    print "Start load_images(): %i books." % len(books)

    # Loop.
    for book in books:

        # Skip books with no title or ISBN.
        if book['title'] == "":
            continue

        if 'isbn' not in book or book['isbn'] == "":
            continue

        # Construct the URL with secrets and the ISBN.
        book_url = "http://imagesa.btol.com/ContentCafe/Jacket.aspx?UserID=%s&Password=%s&Return=T&Type=L&Value=%s" % (
            secrets['BAKER_TAYLOR_USERID'],
            secrets['BAKER_TAYLOR_PASSWORD'],
            book['isbn'])

        # Request the image.
        r = requests.get(book_url)

        path = 'www/assets/cover/%s.jpg' % book['slug']

        # Write the image to www using the slug as the filename.
        with open(path, 'wb') as writefile:
            writefile.write(r.content)

        file_size = os.path.getsize(path)
        if file_size < 10000:
            print u'LOG (%s): Image not available from Baker and Taylor, using NPR book page' % book['title']
            url = 'http://www.npr.org/%s' % book['book_seamus_id']
            npr_r = requests.get(url)
            soup = BeautifulSoup(npr_r.content)
            try:
                if book['title'] == 'The Three-Body Problem':
                    alt_img_url = 'http://media.npr.org/assets/bakertaylor/covers/t/the-three-body-problem/9780765377067_custom-d83e0e334f348e6c52fe5da588ec3448921af64f-s600-c85.jpg'
                else:
                    alt_img_url = soup.select('.bookedition .image img')[0].attrs.get('src').replace('s99', 's400')
                print 'LOG (%s): Getting alternate image from %s' % (book['title'], alt_img_url)
                alt_img_resp = requests.get(alt_img_url)
                with open(path, 'wb') as writefile:
                    writefile.write(alt_img_resp.content)
            except IndexError:
                print u'ERROR (%s): Image not available on NPR book page either (%s)' % (book['title'], url)

        image = Image.open(path)
        image.save(path, optimize=True, quality=75)

    print "End."
コード例 #57
0
ファイル: public_app.py プロジェクト: dannydb/tumblr-blog
def _post_to_tumblr():
    """
    Handles the POST to Tumblr.
    """

    def strip_html(value):
        """
        Strips HTML from a string.
        """
        return re.compile(r'</?\S([^=]*=(\s*"[^"]*"|\s*\'[^\']*\'|\S*)|[^>])*?>', re.IGNORECASE).sub('', value)

    def strip_breaks(value):
        """
        Converts newlines, returns and other breaks to <br/>.
        """
        value = re.sub(r'\r\n|\r|\n', '\n', value)
        return value.replace('\n', do_mark_safe('<br/>'))

    # Request is a global. Import it down here where we need it.
    from flask import request

    # These should match the form fields.
    message = strip_html(request.form.get('message', None))
    message = escape(message)
    message = strip_breaks(message)

    name = strip_html(request.form.get('signed_name', None))
    email = strip_html(request.form.get('email', None))

    context = {
        'message': message,
        'name': name,
        'email': email,
        'app_config': app_config
    }

    caption = render_template('caption.html', **context)

    secrets = app_config.get_secrets()
    t = Tumblpy(
        app_key=secrets['TUMBLR_APP_KEY'],
        app_secret=secrets['TUMBLR_APP_SECRET'],
        oauth_token=secrets['TUMBLR_OAUTH_TOKEN'],
        oauth_token_secret=secrets['TUMBLR_OAUTH_TOKEN_SECRET'])

    file_path = '/uploads/%s/%s_%s' % (
        app_config.PROJECT_SLUG,
        str(time.mktime(datetime.datetime.now().timetuple())).replace('.', ''),
        secure_filename(request.files['image'].filename.replace(' ', '-'))
    )

    with open('/var/www%s' % file_path, 'w') as f:
        f.write(request.files['image'].read())

    params = {
        "type": "photo",
        "caption": caption,
        "tags": app_config.TUMBLR_TAGS,
        "source": "http://%s%s" % (app_config.SERVERS[0], file_path)
    }

    try:
        tumblr_post = t.post('post', blog_url=app_config.TUMBLR_URL, params=params)
        tumblr_url = u"http://%s/%s" % (app_config.TUMBLR_URL, tumblr_post['id'])
        logger.info('200 %s reader(%s %s) (times in EST)' % (tumblr_url, name, email))

        return redirect(tumblr_url, code=301)

    except TumblpyError, e:
        logger.error('%s %s http://%s%s reader(%s %s) (times in EST)' % (
            e.error_code, e.msg, app_config.SERVERS[0], file_path, name, email))
        return 'TUMBLR ERROR'
コード例 #58
0
ファイル: data.py プロジェクト: nprapps/books17
def load_images():
    """
    Downloads images from Baker and Taylor.
    Eschews the API for a magic URL pattern, which is faster.
    """

    # Secrets.
    secrets = app_config.get_secrets()

    # Open the books JSON.
    with open('www/static-data/books.json', 'rb') as readfile:
        books = json.loads(readfile.read())

    print "Start load_images(): %i books." % len(books)

    always_use_npr_cover = set(app_config.ALWAYS_USE_NPR_COVER)

    # Loop.
    for book in books:

        # Skip books with no title or ISBN.
        if book['title'] == "":
            logger.warning('found book with no title')
            continue

        if 'isbn' not in book or book['isbn'] == "":
            logger.warning('This book has no isbn: %s' % book['title'])
            continue

        # Construct the URL with secrets and the ISBN.
        book_url = "http://imagesa.btol.com/ContentCafe/Jacket.aspx"

        params = {
            'UserID': secrets['BAKER_TAYLOR_USERID'],
            'Password': secrets['BAKER_TAYLOR_PASSWORD'],
            'Value': book['isbn'],
            'Return': 'T',
            'Type': 'L'
        }

        # Request the image.
        r = requests.get(book_url, params=params)

        path = 'www/assets/cover'
        if not os.path.exists(path):
            os.makedirs(path)

        imagepath = '%s/%s.jpg' % (path, book['slug'])

        if os.path.exists(imagepath):
            logger.debug('image already downloaded for: %s' % book['slug'])

        # Write the image to www using the slug as the filename.
        with open(imagepath, 'wb') as writefile:
            writefile.write(r.content)

        file_size = os.path.getsize(imagepath)
        use_npr_book_page = (
                file_size < 10000 or
                book['isbn'] in always_use_npr_cover
        )
        if use_npr_book_page:
            msg = ('(%s): Image not available from Baker and Taylor, '
                   'using NPR book page') % book['title']
            logger.info(msg)
            try:
                alt_img_url = _get_npr_cover_img_url(book)
                msg = 'LOG (%s): Getting alternate image from %s' % (
                    book['title'], alt_img_url)
                logger.info(msg)
                alt_img_resp = requests.get(alt_img_url)
                with open(imagepath, 'wb') as writefile:
                    writefile.write(alt_img_resp.content)
            except ValueError:
                msg = (
                    'ERROR (%s): Image not available on NPR book page either'
                ) % (book['title'])
                logger.info(msg)

        image = Image.open(imagepath)
        image.save(imagepath, optimize=True, quality=75)

    logger.info("Load Images End.")