Exemple #1
0
from django.views.decorators.cache import cache_page
from django.shortcuts import get_object_or_404, render
from django.conf import settings
from universalclient import Client

# Meetup API
meetup = Client("http://api.meetup.com").setArgs(params={"key": settings.MEETUP_API_KEY})

# Upcoming events
upcoming = meetup._('2').events.setArgs(params={"group_urlname": "dcpython"})
upcoming = upcoming.get()
upcoming = upcoming.json()
upcoming = upcoming.get('results')

# Past events
past = meetup._('2').events.setArgs(params={"group_urlname": "dcpython", "status": "past"})
past = past.get()
past = past.json()
past = past.get('results')
past.reverse()


@cache_page(3600)  # Cache API results for one hour
def events(request):
    return render(request, 'events/events.html', {"upcoming": upcoming, "past": past, "active": "events"}) # active needed for nav


def update(request):
    """
    Look up events that have changed on meetup.com and call Models API
    """
Exemple #2
0
# encoding: utf-8
from __future__ import absolute_import

from functools import wraps
import datetime

from dateutil.tz import tzutc
from django.conf import settings
from universalclient import Client

# Meetup API
MEETUP = Client("http://api.meetup.com").setArgs(params={"key": settings.MEETUP_API_KEY})

# Upcoming events
UPCOMING_EVENTS = MEETUP._('2').events.setArgs(params={"group_urlname": "dcpython"})

# Past events
PAST_EVENTS = MEETUP._('2').events.setArgs(params={"group_urlname": "dcpython", "status": "past"})


# Via https://github.com/pythonkc/pythonkc-meetups/blob/master/pythonkc_meetups/parsers.py#L102
def parse_datetime_ms(utc_timestamp_ms, utc_offset_ms=None):
    """
    Create a timezone-aware ``datetime.datetime`` from the given UTC timestamp
    (in milliseconds), if provided. If an offest it given, it is applied to the
    datetime returned.

    Parameters
    ----------
    utc_timestamp_ms
        UTC timestamp in milliseconds.
Exemple #3
0
from django.shortcuts import get_object_or_404, render
from django.conf import settings
from universalclient import Client

# Meetup API
meetup = Client("http://api.meetup.com").setArgs(params={"key": settings.MEETUP_API_KEY})
events = meetup._('2').events.setArgs(params={"group_urlname": "dcpython"})
upcoming = events.get()
upcoming = upcoming.json()
upcoming = upcoming['results']


def events(request):
    return render(request, 'events/events.html', {"upcoming": upcoming})


def update(request):
    """
    Look up events that have changed on meetup.com and call Models API
    """
Exemple #4
0
from universalclient import Client, jsonFilter
import urllib3
from server import settings
from gevent import subprocess
from server import schemas
import json
import bs4
import re

es_client = Client(settings.ES_HOST, dataFilter=jsonFilter)
es_pool = urllib3.connection_from_url(
    settings.ES_HOST,
    maxsize=50,
    block=True,
    headers=urllib3.util.make_headers(keep_alive=True))

history_index = 'history'
search_index = 'search'

search_client = es_client.search
history_client = es_client.history


def save_indexed_version(gh_type, repo_name, typ, version):
    doc_id = (gh_type + '/' + repo_name).replace('/', '%2F')
    body = json.dumps({'version': version})

    url = '/%s/%s/%s/_update' % (history_index, typ, doc_id)
    resp = es_pool.urlopen('POST', url, body=body)
    if resp.status == 500:
        url = '/%s/%s/%s' % (history_index, typ, doc_id)
Exemple #5
0
from universalclient import Client
import urllib3
from server import utils
import itertools

from wiki import index as wiki
from readme import index as readme
from gh_pages import index as gh_pages
from gh_issues import index as gh_issues

headers = {
    'keep_alive': True,
    'user_agent': 'cfpb-tiresias',
}

gh_api_client = Client(gh_settings.get('API'))

if 'AUTH' in gh_settings:
    gh_api_client = gh_api_client.auth(gh_settings['AUTH'])
    headers['basic_auth'] = '%s:%s' % gh_settings['AUTH']

gh_pool = urllib3.connection_from_url(gh_settings.get('WEB'),
                                      maxsize=50,
                                      block=True)
gh_api_pool = urllib3.connection_from_url(
    gh_settings.get('API'),
    maxsize=50,
    block=True,
    headers=urllib3.util.make_headers(**headers))

Exemple #6
0
from server import settings
import helpers
from universalclient import Client
import time
from datetime import datetime

jira_api_client = Client(settings.JIRA_HOST).rest.api._(2)
jira_fields = 'assignee,creator,updated,project,status,summary,labels,description,comment'
max_results = 500


def index():
    """
    sync all jira issues
    """
    offset = 0
    issues = []

    start = time.mktime(datetime.now().timetuple())

    # Grab all data via API calls, 500 issues at a time
    # TODO gevent solution
    while True:
        resp = jira_api_client.search.params(fields=jira_fields,
                                             startAt=offset,
                                             maxResults=max_results,
                                             ).get().json()
        issues += resp['issues']
        if resp['total'] > len(issues):
            offset += max_results
        else:
Exemple #7
0
from universalclient import Client
import urllib3
from server import utils
import itertools

from gh_wiki import index as gh_wiki
from gh_readme import index as gh_readme
from gh_pages import index as gh_pages
from gh_issues import index as gh_issues

headers = {
    'keep_alive': True,
    'user_agent': 'cfpb-tiresias',
}

gh_api_client = Client(gh_settings.get('API'))

if 'AUTH' in gh_settings:
    gh_api_client = gh_api_client.auth(gh_settings['AUTH'])
    headers['basic_auth'] = '%s:%s' % gh_settings['AUTH']

gh_pool = urllib3.connection_from_url(gh_settings.get('WEB'), maxsize=50, block=True)
gh_api_pool = urllib3.connection_from_url(gh_settings.get('API'), maxsize=50, block=True, headers=urllib3.util.make_headers(**headers))

def _get_org_repos(org_name):
    return (repo['full_name'] for repo in utils.iter_get_url('/orgs/%s/repos' % org_name, gh_api_pool) if not repo['fork'])

def get_repos():
    if not gh_settings.get('ORGS'):
        return []
    org_iters = [_get_org_repos(org_name) for org_name in gh_settings['ORGS']]
Exemple #8
0
from gevent import monkey
from gevent.pool import Pool

pool = Pool(50)

# patches stdlib (including socket and ssl modules) to cooperate with other greenlets
monkey.patch_all()

import urllib2
from os import path
from os.path import join as path_join

DIR = path.dirname(path.realpath(__file__))
LOG = path_join(DIR, '..', 'client', 'dist', 'log')

es_client = Client(settings.ES_HOST)
gh_client = Client(settings.GITHUB_HOST)
gh_api_client = gh_client.api.v3

whitespace_re = re.compile(r'(\W|\n)+')
def extract_text_from_html(soup):
    text_nodes = soup.findAll(text=True)
    text_with_newlines = ' '.join(text_nodes)
    text = whitespace_re.sub(' ', text_with_newlines)
    return text

def _get_soup(url, id):
    """
    return generator that given a url, gets the content, parses it and
    returns a tuple of the urle, the repo name,  and the soup of the tag
    with the given id