Beispiel #1
0
 def post(self, *args, **kwargs):
     print('进入')
     http = httpclient.AsyncHTTPClient()
     data = yield http.fetch("http://127.0.0.1:8000/log", body={22: 44})
     print('完事', data)
     self.finish('6666')
Beispiel #2
0
def getInfo(url):
    http_client = httpclient.AsyncHTTPClient()
    http_client.fetch(url, callback=processInfo)
Beispiel #3
0
 def get_auth_http_client(self):
     return httpclient.AsyncHTTPClient()
Beispiel #4
0
 def __init__(self, *args, **kwargs):
     self.timeout = kwargs.pop('timeout', None)
     super(HTTPClient, self).__init__(*args, **kwargs)
     self.client = httpclient.AsyncHTTPClient()
Beispiel #5
0
def download(url):
    http_client = httpclient.AsyncHTTPClient()
    http_client.fetch(url, callback=doUpdate)
Beispiel #6
0
    def _FetchGoogleContacts(self):
        """Do GMail specific data gathering and checking.
    Queries Google data API for contacts in JSON format.
    """
        # Track fetched contacts regardless of rank in order to dedup contacts retrieved from Google.
        assert self._identity.refresh_token is not None, self._identity

        if self._identity.expires and self._identity.expires < time.time():
            yield gen.Task(self._identity.RefreshGoogleAccessToken,
                           self._client)

        logging.info('fetching Google contacts for identity %r...' %
                     self._identity)
        http_client = httpclient.AsyncHTTPClient()
        # Google data API uses 1-based start index.
        start_index = 1
        retries = 0
        count = FetchContactsOperation._MAX_FETCH_COUNT
        while True:
            if retries >= FetchContactsOperation._MAX_FETCH_RETRIES:
                raise TooManyRetriesError(
                    'failed to fetch contacts %d times; aborting' % retries)
            logging.info('fetching next %d Google contacts for user %d' %
                         (count, self._user_id))
            url = FetchContactsOperation._GOOGLE_CONTACTS_URL + '?' + \
                urllib.urlencode({'max-results': count,
                                  'start-index': start_index,
                                  'alt': 'json'})
            response = yield gen.Task(http_client.fetch,
                                      url,
                                      method='GET',
                                      headers={
                                          'Authorization':
                                          'OAuth %s' %
                                          self._identity.access_token,
                                          'GData-Version':
                                          3.0
                                      })
            try:
                response_dict = www_util.ParseJSONResponse(response)['feed']
            except Exception as exc:
                logging.warning('failed to fetch Google contacts: %s' % exc)
                retries += 1
                continue

            # Temporarily log additional information to figure out why some responses don't seem to have "entry" fields.
            if 'entry' not in response_dict:
                logging.warning('Missing entry: %s' %
                                json.dumps(response_dict, indent=True))

            for c_dict in response_dict.get('entry', []):
                # Build identities_properties list from all emails/phone numbers associated with this contact.
                identities_properties = []
                # Process emails first so that if there are any emails, one of them will be first in the
                #   identities_properties list.  This will be *the* identity used for down-level client message
                #   migration.
                for email_info in c_dict.get('gd$email', []):
                    email = email_info.get('address', None)
                    if email is not None:
                        email_type = FetchContactsOperation._GOOGLE_TYPE_LOOKUP.get(
                            email_info.get('rel', None), None)
                        identity_properties = (
                            'Email:' + Identity.CanonicalizeEmail(email),
                            email_info.get('label', email_type))
                        if email_info.get('primary', False):
                            # Insert the primary email address at the head of the list.  Older clients will get this
                            #   as the only email address for this contact when they query_contacts.
                            identities_properties.insert(
                                0, identity_properties)
                        else:
                            identities_properties.append(identity_properties)
                for phone_info in c_dict.get('gd$phoneNumber', []):
                    # See RFC3966: "The tel URI for Telephone Numbers" for more information about this format.
                    #   It should be 'tel:' + E.164 format phone number.
                    phone = phone_info.get('uri', None)
                    if phone is not None and phone.startswith(
                            'tel:+') and Identity.CanCanonicalizePhone(
                                phone[4:]):
                        phone_type = FetchContactsOperation._GOOGLE_TYPE_LOOKUP.get(
                            phone_info.get('rel', None), None)
                        identities_properties.append(
                            ('Phone:' + Identity.CanonicalizePhone(phone[4:]),
                             phone_info.get('label', phone_type)))

                if len(identities_properties) == 0:
                    continue

                # Normalize name to None if empty.
                gd_name = c_dict.get('gd$name', None)
                if gd_name is not None:
                    names = {
                        'name':
                        gd_name.get('gd$fullName', {}).get('$t', None),
                        'given_name':
                        gd_name.get('gd$givenName', {}).get('$t', None),
                        'family_name':
                        gd_name.get('gd$familyName', {}).get('$t', None)
                    }
                else:
                    names = {
                        'name': None,
                        'given_name': None,
                        'family_name': None
                    }

                fetched_contact = Contact.CreateFromKeywords(
                    self._user_id,
                    identities_properties,
                    self._notify_timestamp,
                    Contact.GMAIL,
                    rank=None,
                    **names)
                self._fetched_contacts[
                    fetched_contact.contact_id] = fetched_contact

            # Prepare to fetch next batch.
            # Indexes are 1-based, so add 1 to max_index.
            if 'openSearch$totalResults' in response_dict:
                max_index = int(
                    response_dict['openSearch$totalResults']['$t']) + 1
            else:
                max_index = FetchContactsOperation._MAX_GOOGLE_CONTACTS + 1
            next_index = int(
                response_dict['openSearch$startIndex']['$t']) + len(
                    response_dict.get('entry', []))
            count = min(max_index - next_index,
                        FetchContactsOperation._MAX_FETCH_COUNT)
            if len(
                    self._fetched_contacts
            ) < FetchContactsOperation._MAX_GOOGLE_CONTACTS and count > 0:
                start_index = next_index
                retries = 0
                continue
            else:
                raise gen.Return()
Beispiel #7
0
        def _DetermineFacebookRankings():
            """Uses The tags from friends and the authors of the
      photos are used to determine friend rank for facebook contacts. The
      basic algorithm is:

      sorted([sum(exp_decay(pc.time) * strength(pc)) for pc in photos])

      A 'pc' in is a photo connection. There are three types, ordered by
      the 'strength' they impart in the summation equation:
        - from: the poster of a photo (strength=1.0)
        - tag: another user tagged in the photo (strength=1.0)
        - like: a facebook user who 'liked' the photo (strength=0.25)
      Exponential decay uses _FACEBOOK_CONNECTION_HALF_LIFE for half life.

      The rankings are passed to the provided callback as a dictionary of
      identity ('FacebookGraph:<id>') => rank.
      """
            logging.info(
                'determining facebook contact rankings for identity %r...' %
                self._identity)
            http_client = httpclient.AsyncHTTPClient()
            friends = dict()  # facebook id => connection strength
            likes = dict()
            now = util.GetCurrentTimestamp()

            def _ComputeScore(create_iso8601, conn_type):
                """Computes the strength of a photo connection based on the time
        that's passed and the connection type.
        """
                decay = 0.001  # default is 1/1000th
                if create_iso8601:
                    dt = iso8601.parse_date(create_iso8601)
                    create_time = calendar.timegm(dt.utctimetuple())
                    decay = math.exp(
                        -math.log(2) * (now - create_time) /
                        FetchContactsOperation._FACEBOOK_CONNECTION_HALF_LIFE)
                return decay * FetchContactsOperation._PHOTO_CONNECTION_STRENGTHS[
                    conn_type]

            # Construct the URL that will kick things off.
            url = FetchContactsOperation._FACEBOOK_PHOTOS_URL + '?' + \
                urllib.urlencode({'access_token': self._identity.access_token,
                                  'format': 'json', 'limit': FetchContactsOperation._MAX_FETCH_COUNT})
            while True:
                logging.info(
                    'querying next %d Facebook photos for user %d' %
                    (FetchContactsOperation._MAX_FETCH_COUNT, self._user_id))
                response = yield gen.Task(http_client.fetch, url, method='GET')
                response_dict = www_util.ParseJSONResponse(response)
                for p_dict in response_dict['data']:
                    created_time = p_dict.get('created_time', None)
                    if p_dict.get('from', None) and p_dict['from']['id']:
                        from_id = p_dict['from']['id']
                        friends[from_id] = friends.get(from_id, 0.0) + \
                            _ComputeScore(created_time, 'from')

                    if p_dict.get('tags', None):
                        for tag in p_dict['tags']['data']:
                            if tag.get('id', None) is not None:
                                friends[tag['id']] = friends.get(tag['id'], 0.0) + \
                                    _ComputeScore(tag.get('created_time', None), 'tag')

                    if p_dict.get('likes', None):
                        for like in p_dict['likes']['data']:
                            if like.get('id', None) is not None:
                                likes[like['id']] = likes.get(like['id'], 0.0) + \
                                    _ComputeScore(created_time, 'like')

                if (len(response_dict['data'])
                        == FetchContactsOperation._MAX_FETCH_COUNT
                        and response_dict.has_key('paging')
                        and response_dict['paging'].has_key('next')):
                    url = response_dict['paging']['next']
                else:
                    for fb_id in friends.keys():
                        friends[fb_id] += likes.get(fb_id, 0.0)
                    ranked_friends = sorted(friends.items(),
                                            key=itemgetter(1),
                                            reverse=True)
                    logging.info(
                        'successfully ranked %d Facebook contacts for user %d'
                        % (len(ranked_friends), self._user_id))
                    raise gen.Return(dict([('FacebookGraph:%s' % fb_id, rank) for rank, (fb_id, _) in \
                                          izip(xrange(len(ranked_friends)), ranked_friends)]))
Beispiel #8
0
def get_async_client():
    http_client = httpclient.AsyncHTTPClient()
    return http_client
Beispiel #9
0
 def handler(self, *args):
     http = httpclient.AsyncHTTPClient()
     http.fetch("http://api.flickr.com/services/feeds/photos_public.gne",
         callback=self.on_response)
Beispiel #10
0
 def __init__(self, host='127.0.0.1', port=8500, scheme='http'):
     self.host = host
     self.port = port
     self.scheme = scheme
     self.base_uri = '%s://%s:%s' % (self.scheme, self.host, self.port)
     self.client = httpclient.AsyncHTTPClient()
Beispiel #11
0
    def post(self, *args, **kwargs):
        xml_data = self.request.body
        dict_data = xmltodict.parse(xml_data)
        msg_type = dict_data['xml']['MsgType']
        if msg_type == 'text':
            content = dict_data['xml']['Content']
            req = robot(content)
            client = httpclient.AsyncHTTPClient()
            response = yield client.fetch(req)
            if response.error:
                self.send_error(500)
            else:
                data = json.loads(response.body.decode('utf-8'))
                resp_data = {
                    'xml': {
                        'ToUserName': dict_data['xml']['FromUserName'],
                        'FromUserName': dict_data['xml']['ToUserName'],
                        'CreateTime': int(time.time()),
                        'MsgType': 'text',
                        'Content': data["results"][0]["values"]["text"]
                    }
                }
                self.write(xmltodict.unparse(resp_data))
        elif msg_type == 'image':
            media_id = dict_data['xml']['MediaId']
            resp_data = {
                'xml': {
                    'ToUserName': dict_data['xml']['FromUserName'],
                    'FromUserName': dict_data['xml']['ToUserName'],
                    'CreateTime': int(time.time()),
                    'MsgType': 'image',
                    'Image': {
                        'MediaId': media_id
                    }
                }
            }
            self.write(xmltodict.unparse(resp_data))
        elif msg_type == 'voice':
            recognition = dict_data['xml']['Recognition']
            media_id = dict_data['xml']['MediaId']
            print(recognition)
            resp_data = {
                'xml': {
                    'ToUserName': dict_data['xml']['FromUserName'],
                    'FromUserName': dict_data['xml']['ToUserName'],
                    'CreateTime': int(time.time()),
                    'MsgType': 'voice',
                    'Voice': {
                        'MediaId': media_id
                    }
                }
            }
            if recognition == '授权链接':
                resp_data = {
                    'xml': {
                        'ToUserName':
                        dict_data['xml']['FromUserName'],
                        'FromUserName':
                        dict_data['xml']['ToUserName'],
                        'CreateTime':
                        int(time.time()),
                        'MsgType':
                        'text',
                        'Content':
                        'https://open.weixin.qq.com/connect/oauth2/authorize?appid=%s&redirect_uri=%s&response_type=code&scope=snsapi_userinfo&state=iloveu#wechat_redirect'
                        % (WECHAT_APPID, quote(REDIRECT_URI))
                    }
                }

            self.write(xmltodict.unparse(resp_data))
        elif msg_type == 'event':
            event_type = dict_data['xml']['Event']
            if event_type == 'subscribe':
                resp_data = {
                    'xml': {
                        'ToUserName': dict_data['xml']['FromUserName'],
                        'FromUserName': dict_data['xml']['ToUserName'],
                        'CreateTime': int(time.time()),
                        'MsgType': 'text',
                        'Content': 'Hello Python --Tornado'
                    }
                }
                event_key = dict_data['xml'].get('EventKey', None)
                if event_key:
                    scend_id = event_key[8:]
                    resp_data['xml'][
                        'Content'] = 'The parameter of this qr code is:%s' % scend_id
                self.write(xmltodict.unparse(resp_data))
            elif event_type == 'SCAN':
                event_key = dict_data['xml']['EventKey']
                resp_data = {
                    'xml': {
                        'ToUserName':
                        dict_data['xml']['FromUserName'],
                        'FromUserName':
                        dict_data['xml']['ToUserName'],
                        'CreateTime':
                        int(time.time()),
                        'MsgType':
                        'text',
                        'Content':
                        'Thank you for your attention. The parameters of your scan are:: %s'
                        % event_key
                    }
                }
                self.write(xmltodict.unparse(resp_data))
            elif event_type == 'unsubscribe':
                resp_data = {
                    'xml': {
                        'ToUserName': dict_data['xml']['FromUserName'],
                        'FromUserName': dict_data['xml']['ToUserName'],
                        'CreateTime': int(time.time()),
                        'MsgType': 'text',
                        'Content': 'Bey --Tornado'
                    }
                }
                self.write(xmltodict.unparse(resp_data))
            else:  # Other events
                resp_data = {
                    'xml': {
                        'ToUserName': dict_data['xml']['FromUserName'],
                        'FromUserName': dict_data['xml']['ToUserName'],
                        'CreateTime': int(time.time()),
                        'MsgType': 'text',
                        'Content': 'The universal response'
                    }
                }
                self.write(xmltodict.unparse(resp_data))
        else:
            resp_data = {
                'xml': {
                    'ToUserName': dict_data['xml']['FromUserName'],
                    'FromUserName': dict_data['xml']['ToUserName'],
                    'CreateTime': int(time.time()),
                    'MsgType': 'text',
                    'Content': 'The universal response'
                }
            }
            self.write(xmltodict.unparse(resp_data))
Beispiel #12
0
def let_the_streaming_begin(io_loop, bootstrap_ip_ports):
    #Setup the DHT
    global dht
    dht = DHT(51414, bootstrap_ip_ports, io_loop = io_loop)

    #Setup KTorrent and Its URL Handlers
    settings = dict( (k, v.value()) for k,v in options.items() )
    application = BTApplication(routes, **settings)
    Connection.ioloop = io_loop
    Connection.application = application

    settings['log_function'] = request_logger
    frontend_application = tornado.web.Application(frontend_routes, **settings)
    frontend_server = tornado.httpserver.HTTPServer(frontend_application, io_loop=io_loop)
    Connection.frontend_server = frontend_server

    try:
        frontend_server.bind(options.frontend_port, '')
        frontend_server.start()
        #logging.info('started frontend server')
    except:
        logging.error('could not start frontend server')


    btserver = BTProtocolServer(application, io_loop=io_loop)
    btserver.bind(options.port, '')
    btserver.start()

    logging.info('started btserver')
    logging.info('\n\n')


    Client.resume()
    client = Client.instances[0]
    Client.http_client = httpclient.AsyncHTTPClient()
    Torrent.client = client

    tornado.ioloop.PeriodicCallback( Connection.make_piece_request, 1000 * 1, io_loop=io_loop ).start()
    tornado.ioloop.PeriodicCallback( Connection.get_metainfo, 1000 * 1, io_loop=io_loop ).start() # better to make event driven
    tornado.ioloop.PeriodicCallback( Client.tick, 1000 * 1, io_loop=io_loop ).start()
    tornado.ioloop.PeriodicCallback( client.do_trackers, 1000 * 1, io_loop=io_loop ).start()
    tornado.ioloop.PeriodicCallback( client.peer_think, 3000 * 1, io_loop=io_loop ).start()
    tornado.ioloop.PeriodicCallback( Connection.cleanup_old_requests, 1000 * 1, io_loop=io_loop ).start()



    def got_interrupt_signal(signum=None, frame=None):
        logging.info('got quit signal ... saving quick resume')
        Client.save_settings()
        #Torrent.save_quick_resume()
        sys.exit()

    signal.signal(signal.SIGINT, got_interrupt_signal)


    settings = dict( (k, v.value()) for k,v in options.items() )

    dht_frontend_routes = [
        ('/?', dhttornado.IndexHandler),
    ]
    dht_frontend_application = tornado.web.Application(dht_frontend_routes, **settings)
    dht_frontend_server = tornado.httpserver.HTTPServer(dht_frontend_application, io_loop=io_loop)
    dht_frontend_server.bind(options.dht_frontend_port, '')
    dht_frontend_server.start()

    dhttornado.IndexHandler.register_dht(dht)

    dht.bootstrap()
    dht.start()  #This also does io_loop.start()
Beispiel #13
0
 def add_logs(self):
     api_client = httpclient.AsyncHTTPClient()
     yield api_client.fetch(self.url,
                            method='POST',
                            body=httputil.urlencode(self.data))
Beispiel #14
0
 def initialize(self, format=None, format_prefix=""):
     self.format = format or self.default_format
     self.format_prefix = format_prefix
     self.http_client = httpclient.AsyncHTTPClient()
Beispiel #15
0
 def __init__(self, search, i, session):
     self.search = search
     self.id = i
     self.client = httpclient.AsyncHTTPClient()
     self.session = session
     super(DetailWorker, self).__init__()
Beispiel #16
0
 def init_handler(self):
     self.room = self.options.get('room')
     self.key = self.options.get('key')
     assert self.room, 'Hipchat room is not defined.'
     assert self.key, 'Hipchat key is not defined.'
     self.client = hc.AsyncHTTPClient()
Beispiel #17
0
    async def proxy(self, host, port, proxied_path):
        '''
        This serverextension handles:
            {base_url}/proxy/{port([0-9]+)}/{proxied_path}
            {base_url}/proxy/absolute/{port([0-9]+)}/{proxied_path}
            {base_url}/{proxy_base}/{proxied_path}
        '''

        if not self._check_host_whitelist(host):
            self.set_status(403)
            self.write(
                "Host '{host}' is not whitelisted. "
                "See https://jupyter-server-proxy.readthedocs.io/en/latest/arbitrary-ports-hosts.html for info."
                .format(host=host))
            return

        if 'Proxy-Connection' in self.request.headers:
            del self.request.headers['Proxy-Connection']

        self._record_activity()

        if self.request.headers.get("Upgrade", "").lower() == 'websocket':
            # We wanna websocket!
            # jupyterhub/jupyter-server-proxy@36b3214
            self.log.info(
                "we wanna websocket, but we don't define WebSocketProxyHandler"
            )
            self.set_status(500)

        body = self.request.body
        if not body:
            if self.request.method == 'POST':
                body = b''
            else:
                body = None

        client = httpclient.AsyncHTTPClient()

        req = self._build_proxy_request(host, port, proxied_path, body)

        try:
            response = await client.fetch(req, raise_error=False)
        except httpclient.HTTPError as err:
            if err.code == 599:
                self._record_activity()
                self.set_status(599)
                self.write(str(err))
                return
            else:
                raise

        # record activity at start and end of requests
        self._record_activity()

        # For all non http errors...
        if response.error and type(response.error) is not httpclient.HTTPError:
            self.set_status(500)
            self.write(str(response.error))
        else:
            self.set_status(response.code, response.reason)

            # clear tornado default header
            self._headers = httputil.HTTPHeaders()

            for header, v in response.headers.get_all():
                if header not in ('Content-Length', 'Transfer-Encoding',
                                  'Content-Encoding', 'Connection'):
                    # some header appear multiple times, eg 'Set-Cookie'
                    self.add_header(header, v)

            if response.body:
                self.write(response.body)
Beispiel #18
0
 def authenticate_redirect(self):
     http = httpclient.AsyncHTTPClient()
     http.fetch(
         self._oauth_request_token_url(),
         self.async_callback(self._on_request_token,
                             self._OAUTH_AUTHENTICATE_URL, None))
Beispiel #19
0
    def _FetchFacebookContacts(self):
        """Do Facebook specific data gathering and checking.
    Queries Facebook graph API for friend list using the identity's access token.
    """
        @gen.coroutine
        def _DetermineFacebookRankings():
            """Uses The tags from friends and the authors of the
      photos are used to determine friend rank for facebook contacts. The
      basic algorithm is:

      sorted([sum(exp_decay(pc.time) * strength(pc)) for pc in photos])

      A 'pc' in is a photo connection. There are three types, ordered by
      the 'strength' they impart in the summation equation:
        - from: the poster of a photo (strength=1.0)
        - tag: another user tagged in the photo (strength=1.0)
        - like: a facebook user who 'liked' the photo (strength=0.25)
      Exponential decay uses _FACEBOOK_CONNECTION_HALF_LIFE for half life.

      The rankings are passed to the provided callback as a dictionary of
      identity ('FacebookGraph:<id>') => rank.
      """
            logging.info(
                'determining facebook contact rankings for identity %r...' %
                self._identity)
            http_client = httpclient.AsyncHTTPClient()
            friends = dict()  # facebook id => connection strength
            likes = dict()
            now = util.GetCurrentTimestamp()

            def _ComputeScore(create_iso8601, conn_type):
                """Computes the strength of a photo connection based on the time
        that's passed and the connection type.
        """
                decay = 0.001  # default is 1/1000th
                if create_iso8601:
                    dt = iso8601.parse_date(create_iso8601)
                    create_time = calendar.timegm(dt.utctimetuple())
                    decay = math.exp(
                        -math.log(2) * (now - create_time) /
                        FetchContactsOperation._FACEBOOK_CONNECTION_HALF_LIFE)
                return decay * FetchContactsOperation._PHOTO_CONNECTION_STRENGTHS[
                    conn_type]

            # Construct the URL that will kick things off.
            url = FetchContactsOperation._FACEBOOK_PHOTOS_URL + '?' + \
                urllib.urlencode({'access_token': self._identity.access_token,
                                  'format': 'json', 'limit': FetchContactsOperation._MAX_FETCH_COUNT})
            while True:
                logging.info(
                    'querying next %d Facebook photos for user %d' %
                    (FetchContactsOperation._MAX_FETCH_COUNT, self._user_id))
                response = yield gen.Task(http_client.fetch, url, method='GET')
                response_dict = www_util.ParseJSONResponse(response)
                for p_dict in response_dict['data']:
                    created_time = p_dict.get('created_time', None)
                    if p_dict.get('from', None) and p_dict['from']['id']:
                        from_id = p_dict['from']['id']
                        friends[from_id] = friends.get(from_id, 0.0) + \
                            _ComputeScore(created_time, 'from')

                    if p_dict.get('tags', None):
                        for tag in p_dict['tags']['data']:
                            if tag.get('id', None) is not None:
                                friends[tag['id']] = friends.get(tag['id'], 0.0) + \
                                    _ComputeScore(tag.get('created_time', None), 'tag')

                    if p_dict.get('likes', None):
                        for like in p_dict['likes']['data']:
                            if like.get('id', None) is not None:
                                likes[like['id']] = likes.get(like['id'], 0.0) + \
                                    _ComputeScore(created_time, 'like')

                if (len(response_dict['data'])
                        == FetchContactsOperation._MAX_FETCH_COUNT
                        and response_dict.has_key('paging')
                        and response_dict['paging'].has_key('next')):
                    url = response_dict['paging']['next']
                else:
                    for fb_id in friends.keys():
                        friends[fb_id] += likes.get(fb_id, 0.0)
                    ranked_friends = sorted(friends.items(),
                                            key=itemgetter(1),
                                            reverse=True)
                    logging.info(
                        'successfully ranked %d Facebook contacts for user %d'
                        % (len(ranked_friends), self._user_id))
                    raise gen.Return(dict([('FacebookGraph:%s' % fb_id, rank) for rank, (fb_id, _) in \
                                          izip(xrange(len(ranked_friends)), ranked_friends)]))

        logging.info('fetching Facebook contacts for identity %r...' %
                     self._identity)
        http_client = httpclient.AsyncHTTPClient()
        # Track fetched contacts regardless of rank in order to dedup contacts retrieved from Facebook.
        rankless_ids = set()

        # First get the rankings and then fetch the contacts.
        rankings = yield _DetermineFacebookRankings()
        url = FetchContactsOperation._FACEBOOK_FRIENDS_URL + '?' + \
            urllib.urlencode({'fields': 'first_name,name,last_name',
                              'access_token': self._identity.access_token,
                              'format': 'json', 'limit': FetchContactsOperation._MAX_FETCH_COUNT})
        retries = 0
        while True:
            if retries >= FetchContactsOperation._MAX_FETCH_RETRIES:
                raise TooManyRetriesError(
                    'failed to fetch contacts %d times; aborting' % retries)
            logging.info(
                'fetching next %d Facebook contacts for user %d' %
                (FetchContactsOperation._MAX_FETCH_COUNT, self._user_id))
            response = yield gen.Task(http_client.fetch, url, method='GET')
            try:
                response_dict = www_util.ParseJSONResponse(response)
            except Exception as exc:
                logging.warning('failed to fetch Facebook contacts: %s' % exc)
                retries += 1
                continue

            for c_dict in response_dict['data']:
                if c_dict.has_key('id'):
                    ident = 'FacebookGraph:%s' % c_dict['id']

                    # Skip contact if name is not present, or is empty.
                    name = c_dict.get('name', None)
                    if name:
                        names = {
                            'name': name,
                            'given_name': c_dict.get('first_name', None),
                            'family_name': c_dict.get('last_name', None)
                        }

                        # Check to see if we've already processed an identical contact.
                        rankless_id = Contact.CalculateContactEncodedDigest(
                            identities_properties=[(ident, None)], **names)
                        if rankless_id in rankless_ids:
                            # Duplicate among fetched contacts. Skip it.
                            continue
                        else:
                            rankless_ids.add(rankless_id)

                        rank = rankings[ident] if ident in rankings else None
                        fetched_contact = Contact.CreateFromKeywords(
                            self._user_id, [(ident, None)],
                            self._notify_timestamp,
                            Contact.FACEBOOK,
                            rank=rank,
                            **names)
                        self._fetched_contacts[
                            fetched_contact.contact_id] = fetched_contact

            # Prepare to fetch next batch.
            if (len(response_dict['data'])
                    == FetchContactsOperation._MAX_FETCH_COUNT
                    and response_dict.has_key('paging')
                    and response_dict['paging'].has_key('next')):
                retries = 0
                url = response_dict['paging']['next']
            else:
                break
Beispiel #20
0
def _get_next_id():
    response = yield httpclient.AsyncHTTPClient().fetch(DB_USERS)
    data = json.loads(response.body.decode('utf-8'))
    i = int(data['doc_count']) + 1000
    zeros = '0' * (9 - len(str(i)))
    raise gen.Return(zeros + str(i))
Beispiel #21
0
    os.remove('url.txt')
f = open('url.txt', 'a')
urllies = ['']
while n > 0:
    url = raw_input("what url would you like to test: ")
    req = Request(url)
    try:
        response = urlopen(req)
    except URLError, e:
        if hasattr(e, 'reason'):
            print(url + ' failed for this reason: ' + e.reason)
        elif hasattr(e, 'code'):
            print(url + ' failed because of code: ' + e.code)
    else:
        urllies.append(str(url))
        f.write(url + '\n')
    n -= 1
urllies.pop(0)
f.close()
print("test started at: " + str(time.time()))

httpc = httpclient.AsyncHTTPClient()
while m > 0:
    for url in open('url.txt'):
        print(urllies[i] + " at " + str(time.time()) + " responds with:")
        i += 1
        httpc.fetch(url.strip(), handle_request, method='HEAD')
    m -= 1
    ioloop.IOLoop.instance().start()
print("test ended at: " + str(time.time()))
Beispiel #22
0
 def _get_records_count(self):
     response = yield httpclient.AsyncHTTPClient().fetch(DB_RECORDS)
     dat = json.loads(response.body.decode('utf-8'))
     return int(dat['doc_count']) + 1000
Beispiel #23
0
        id = obj[id_field]
        temp = '{ "index" : {  "_id" : "' + id + '" } }\n' + json.dumps(
            obj) + '\n'
        body = body + temp

i = 0


def handle_request(response):
    print '%d %s' % (response.code, response.body)
    global i
    i -= 1
    if i == 0:
        ioloop.IOLoop.instance().stop()


http_client = httpclient.AsyncHTTPClient()

postHeaders = {"Content-Type": "application/json"}

#for future implementation
for iterator in range(1):
    i += 1
    print("sending request")
    http_client.fetch(request_url,
                      handle_request,
                      method='POST',
                      headers=postHeaders,
                      body=body)

ioloop.IOLoop.instance().start()
Beispiel #24
0
    def twitter_request(self,
                        path,
                        callback,
                        access_token=None,
                        post_args=None,
                        **args):
        """Fetches the given API path, e.g., "/statuses/user_timeline/btaylor"

        The path should not include the format (we automatically append
        ".json" and parse the JSON output).

        If the request is a POST, post_args should be provided. Query
        string arguments should be given as keyword arguments.

        All the Twitter methods are documented at
        http://apiwiki.twitter.com/Twitter-API-Documentation.

        Many methods require an OAuth access token which you can obtain
        through authorize_redirect() and get_authenticated_user(). The
        user returned through that process includes an 'access_token'
        attribute that can be used to make authenticated requests via
        this method. Example usage::

            class MainHandler(tornado.web.RequestHandler,
                              tornado.auth.TwitterMixin):
                @tornado.web.authenticated
                @tornado.web.asynchronous
                def get(self):
                    self.twitter_request(
                        "/statuses/update",
                        post_args={"status": "Testing Tornado Web Server"},
                        access_token=user["access_token"],
                        callback=self.async_callback(self._on_post))

                def _on_post(self, new_entry):
                    if not new_entry:
                        # Call failed; perhaps missing permission?
                        self.authorize_redirect()
                        return
                    self.finish("Posted a message!")

        """
        # Add the OAuth resource request signature if we have credentials
        url = "http://api.twitter.com/1" + path + ".json"
        if access_token:
            all_args = {}
            all_args.update(args)
            all_args.update(post_args or {})
            consumer_token = self._oauth_consumer_token()
            method = "POST" if post_args is not None else "GET"
            oauth = self._oauth_request_parameters(url,
                                                   access_token,
                                                   all_args,
                                                   method=method)
            args.update(oauth)
        if args: url += "?" + urllib.urlencode(args)
        callback = self.async_callback(self._on_twitter_request, callback)
        http = httpclient.AsyncHTTPClient()
        if post_args is not None:
            http.fetch(url,
                       method="POST",
                       body=urllib.urlencode(post_args),
                       callback=callback)
        else:
            http.fetch(url, callback=callback)
Beispiel #25
0
def checkForUpdates(url):
    http_client = httpclient.AsyncHTTPClient()
    http_client.fetch(url, callback=checkVersion)
Beispiel #26
0
    def friendfeed_request(self,
                           path,
                           callback,
                           access_token=None,
                           post_args=None,
                           **args):
        """Fetches the given relative API path, e.g., "/bret/friends"

        If the request is a POST, post_args should be provided. Query
        string arguments should be given as keyword arguments.

        All the FriendFeed methods are documented at
        http://friendfeed.com/api/documentation.

        Many methods require an OAuth access token which you can obtain
        through authorize_redirect() and get_authenticated_user(). The
        user returned through that process includes an 'access_token'
        attribute that can be used to make authenticated requests via
        this method. Example usage::

            class MainHandler(tornado.web.RequestHandler,
                              tornado.auth.FriendFeedMixin):
                @tornado.web.authenticated
                @tornado.web.asynchronous
                def get(self):
                    self.friendfeed_request(
                        "/entry",
                        post_args={"body": "Testing Tornado Web Server"},
                        access_token=self.current_user["access_token"],
                        callback=self.async_callback(self._on_post))

                def _on_post(self, new_entry):
                    if not new_entry:
                        # Call failed; perhaps missing permission?
                        self.authorize_redirect()
                        return
                    self.finish("Posted a message!")

        """
        # Add the OAuth resource request signature if we have credentials
        url = "http://friendfeed-api.com/v2" + path
        if access_token:
            all_args = {}
            all_args.update(args)
            all_args.update(post_args or {})
            consumer_token = self._oauth_consumer_token()
            method = "POST" if post_args is not None else "GET"
            oauth = self._oauth_request_parameters(url,
                                                   access_token,
                                                   all_args,
                                                   method=method)
            args.update(oauth)
        if args: url += "?" + urllib.urlencode(args)
        callback = self.async_callback(self._on_friendfeed_request, callback)
        http = httpclient.AsyncHTTPClient()
        if post_args is not None:
            http.fetch(url,
                       method="POST",
                       body=urllib.urlencode(post_args),
                       callback=callback)
        else:
            http.fetch(url, callback=callback)
Beispiel #27
0
def download(url):
    http_client = httpclient.AsyncHTTPClient()
    http_client.fetch(url, callback=decompress)
Beispiel #28
0
    def _get_http_client(self):
        """Returns an asynchronous web client object

        The object is actually of type SimpleAsyncHTTPClient
        """
        return httpclient.AsyncHTTPClient()
Beispiel #29
0
def get_query_records(sql, query_limit):
    '''
        periodic query records function
    '''
    logging.info('a little brain dead recolection of records')
    record_list = []

    http_client = httpclient.AsyncHTTPClient()

    def handle_record_uuid(response):
        '''
            Request Handler Record UUID
        '''
        if response.error:
            logging.error(response.error)
        else:
            logging.info(response.body)

    def handle_request(response):
        '''
            Request Handler
        '''
        if response.error:
            logging.error(response.error)
        else:
            logging.info(response.body)

            result = json.loads(response.body)

            request_id = result.get('uuid', None)
            if request_id:
                request_id = request_id.get('uuid')

            http_client.fetch('http://iofun.io/records/{0}'.format(request_id),
                              headers={"Content-Type": "application/json"},
                              method='GET',
                              callback=handle_record_uuid)

    try:
        # Get SQL database from system settings
        # PostgreSQL insert new sip account query
        query = '''
            SELECT
                DISTINCT ON (uniqueid) uniqueid,
                calldate as start,
                date(calldate) as strdate,
                clid as callerid,
                src as source,
                dst as destination,
                dcontext as destination_context,
                channel,
                dstchannel as destination_channel,
                duration,
                billsec,
                billsec as seconds,
                disposition,
                checked 
            FROM cdr 
            WHERE checked = false
            ORDER BY uniqueid DESC
            LIMIT {0};
        '''.format(query_limit)
        result = yield sql.query(query)

        logging.warning('Getting {0} rows from PostgreSQL'.format(len(result)))

        for x in result:
            record_list.append(x)

        result.free()

        # TODO: Still need to check the follings exceptions with the new queries module.
        #except (psycopg2.Warning, psycopg2.Error) as e:
        #    logging.exception(e)
        #    raise e

    except Exception, e:
        logging.exception(e)
        raise e
    def get(self):
        # http://localhost:port/search?idx_job=0
        idx_job = self.get_argument('idx_job')
        docID_words = pickle.load(
            open('RelevanceAnalysis/relevance_jobs/%s.out' % idx_job, 'rb'))

        # format of term_relevance_docs cache: ['term'] = ['docID1', 'docID2']
        """
        cache_dict = defaultdict(list)
        if os.path.exists('RelevanceAnalysis/cache_jobs/cache'):
            cache = open('RelevanceAnalysis/cache_jobs/cache',"r")
            for line in cache:
                key = json.loads(line)[0]
                value = json.loads(line)[1]
                cache_dict[key] = value
        """

        docID_releventDocs = defaultdict(list)
        count = 0

        for docID in docID_words:
            releventDocs = []
            storedDocs = []
            storedDocs.append(docID)
            for i in range(len(docID_words[docID])):
                query = docID_words[docID][i][0]
                print(query)
                """
                if query in cache_dict.keys():
                    docIDs = cache_dict[query]
                    for i in range(len(docIDs)):
                        releventDocs.append((docIDs[i], query))
                else:
                """

                httpclient.AsyncHTTPClient.configure(None,
                                                     defaults={
                                                         'connect_timeout':
                                                         300,
                                                         'request_timeout': 300
                                                     })
                http_client_index = httpclient.AsyncHTTPClient()
                indexServerRequest = yield [
                    http_client_index.fetch(server + query)
                    for server in inventory.indexServers
                ]

                docIDs = []
                for request in indexServerRequest:
                    docIDs += json.loads(request.body.decode())["postings"]

                docIDs.sort(key=lambda x: -x[1])

                num_result = min(MAX_RESULT_PER_WORD, len(docIDs))
                docIDs_query = []

                releventDocs_per_query = []
                for d in docIDs:
                    if d[0] in storedDocs:
                        continue
                    else:
                        releventDocs_per_query.append((d[0], query))
                        storedDocs.append(d[0])
                        # cache_dict[query].append(d[0])
                    if len(releventDocs_per_query) == num_result:
                        break

                releventDocs += releventDocs_per_query

                #append_cache = open('RelevanceAnalysis/cache_jobs/cache',"a")
                #append_cache.write(json.dumps((query, cache_dict[query])) + "\n")

            count += 1
            print("%s has extracted %d docs" % (idx_job, count))

            for i in range(len(releventDocs)):
                print(releventDocs[i])

            docID_releventDocs[docID] = releventDocs

        pickle.dump(
            docID_releventDocs,
            open(
                'IndexingRetrieval/relevance_docs/tmpDocs/relevantDocs' +
                str(idx_job), 'wb'))
        self.finish()