Example #1
0
def validate_token(access_token):
    '''Verifies that an access-token is valid and
    meant for this app.

    Returns None on fail, and an e-mail on success'''
    h = Http()
    resp, cont = h.request("https://www.googleapis.com/oauth2/v2/userinfo",
                           headers={'Host': 'www.googleapis.com',
                                    'Authorization': access_token})

    if not resp['status'] == '200':
        return None

    try:
        data = json.loads(cont)
    except TypeError:
        # httplib2 returns byte objects
        data = json.loads(cont.decode())

    email = data['email']
    users = current_app.config.get('FEEDER_USERS', {})
    if current_app.config.get('FEEDER_ALLOW_ANY_GOOGLE', False):
        return email
    elif email in users:
        return email
    else:
        return None
def fetch_bizrate_page(url_part, data=None):
    h = Http()
    req_url = 'http://www.bizrate.co.uk/' + url_part
    if data:
        return h.request(req_url, 'POST', data)
    else:
        return h.request(req_url, 'GET')
Example #3
0
 def close_job(self, job_id):
     doc = self.create_close_job_doc()
     http = Http()
     url = self.endpoint + "/job/%s" % job_id
     resp, content = http.request(url, "POST", headers=self.headers(),
                                  body=doc)
     self.check_status(resp, content)
Example #4
0
    def check(self, instance):
        url = instance['url']
        default_timeout = self.init_config.get('default_timeout', 5)
        timeout = float(instance.get('timeout', default_timeout))

        dimensions = self._set_dimensions(None, instance)

        try:
            h = Http(timeout=timeout)
            resp, content = h.request(url, "GET")

        except socket.timeout:
            return

        except socket.error:
            return

        except HttpLib2Error:
            return

        stats = json.loads(content)

        [self.gauge("riak." + k, stats[k], dimensions=dimensions) for k in self.keys if k in stats]

        coord_redirs_total = stats["coord_redirs_total"]
        if self.prev_coord_redirs_total > -1:
            count = coord_redirs_total - self.prev_coord_redirs_total
            self.gauge('riak.coord_redirs', count, dimensions=dimensions)

        self.prev_coord_redirs_total = coord_redirs_total
Example #5
0
def GetDocument(query=None,url=None,fields=None,filter=None,sort=None):
    data = {}
    flag = 0
    if fields is not None:
        data['fields'] = fields
        flag =1
    if query is not None:
        data['query'] = query
    if filter is not None:
        data['filter'] = filter
    if sort is not None:
        data['sort'] = sort
    h = Http()
    data = jsonpickle.encode(data,unpicklable=True)
    print data
    resp,content = h.request(url,"GET",data)
    print content
    print resp
    if resp.status == 200:
        if flag == 0:
            content = jsonpickle.decode(content)
            result = []
            for res in content['hits']['hits']:
                result.append(res['_source'])
            return result
        else:
            return content
    else:
        return resp
Example #6
0
    def themes_errors(self):
        from c2cgeoportal.models import DBSession, Interface

        settings = self.settings.get("themes", {})

        url = self.request.route_url("themes")
        h = Http()
        default_params = settings.get("default", {}).get("params", {})
        for (interface,) in DBSession.query(Interface.name).all():
            params = {}
            params.update(default_params)
            params.update(settings.get(interface, {}).get("params", {}))
            params["interface"] = interface
            interface_url = add_url_params(url, params)

            interface_url, headers = build_url("Check the theme", interface_url, self.request)

            resp, content = h.request(interface_url, headers=headers)

            if resp.status != httplib.OK:
                self.set_status(resp.status, resp.reason)
                return self.make_response(content)

            result = loads(content)

            if len(result["errors"]) != 0:
                self.set_status(500, "Theme with error")

                return self.make_response(
                    "Theme with error for interface '%s'\n%s" % (Interface.name, "\n".join(result["errors"]))
                )

        return self.make_response("OK")
	def request(self, resource, body=None, method='POST', allow_error = False):
		http = Http()

		if not body:
			body = {}

		body['player_id'] = self.game['id']
		body['secret'] = self.secret
		body = json.dumps(body)

		response, data = http.request("%s/%s" % (self.game['endpoint'], resource), method=method, body=body, headers={"Content-type": "application/json"})

		if not allow_error and response.status != 200:
			print "Error ", data
			return False

		try:
			data = json.loads(data)
			if 'player' in data:
				player = data['player']
				self.resources = player['resources']
				self.generators = player['generators']
				self.improved_generators = player['improved_generators']
				del data['player']
		except ValueError:
			pass

		if allow_error:
			return response, data
		return data
Example #8
0
 def savedsearch(self,q=""):
     """
     Runs one of your saved searches
     """
     query=Http(timeout=10)
     query.add_credentials(self.username,self.password)
     resp, cont=query.request("http://"+self.subdomain+".loggly.com/api/savedsearches","GET")
     content=loads(cont)
     saved=None
     for search in content:
         if search['name']==q:
            saved=search
     if saved==None:
         raise ValueError("Your account does not have a search of that name,\
         please go to "+self.subdomain+".loggly.com to check your saved searches")
     params=saved['context']
     opts={}
     inputs=""
     devices=""
     for x in params:
         if x!="terms" and x!="inputs" and x!="devices":
             opts[self.ssdict[x]]=params[x]
     if params['inputs']:
         inputs+=" AND (inputname:"+" OR inputname:".join(params['inputs'])+")"
     if params['devices']:
         devices+=" AND (ip:"+" OR ip:".join(params['devices'])+")"
     return self.search(q=params['terms']+inputs+devices,**opts)
Example #9
0
 def findsavedsearchnames(self):
     query=Http(timeout=10)
     query.add_credentials(self.username,self.password)
     resp, cont=query.request("http://"+self.subdomain+".loggly.com/api/savedsearches","GET")
     content=loads(cont)
     names=[x['name'] for x in content]
     return names
Example #10
0
def purge_version(version, mainsite=False, subdomain=False, cname=False):
    varnish_servers = getattr(settings, 'VARNISH_SERVERS', None)
    h = Http()
    if varnish_servers:
        for server in varnish_servers:
            if subdomain:
                #Send a request to the Server, to purge the URL of the Host.
                host = "%s.readthedocs.org" % version.project.slug
                headers = {'Host': host}
                url = "/en/%s/*" % version.slug
                to_purge = "http://%s%s" % (server, url)
                print "Purging %s on %s" % (url, host)
                ret = h.request(to_purge, method="PURGE", headers=headers)
            if mainsite:
                headers = {'Host': "readthedocs.org"}
                url = "/docs/%s/en/%s/*" % (version.project.slug, version.slug)
                to_purge = "http://%s%s" % (server, url)
                print "Purging %s on readthedocs.org" % url
                ret = h.request(to_purge, method="PURGE", headers=headers)
            if cname:
                redis_conn = redis.Redis(**settings.REDIS)
                for cnamed in redis_conn.smembers('rtd_slug:v1:%s' % version.project.slug):
                    headers = {'Host': cnamed}
                    url = "/en/%s/*" % version.slug
                    to_purge = "http://%s%s" % (server, url)
                    print "Purging %s on %s" % (url, cnamed)
                    ret = h.request(to_purge, method="PURGE", headers=headers)
                    print ret
Example #11
0
def makeRequest(url,nocache=False,overrideCache=False):
    # check cache
    if(DO_CACHING and not nocache and not overrideCache):
        QUERY_CACHE = sqlite3.connect("query_cache.db")
        try:
            result = QUERY_CACHE.execute("SELECT * FROM cache WHERE url=?",(url,))
        except sqlite3.OperationalError:
            # database isn't set up yet
            setupCacheDB()
            result = QUERY_CACHE.execute("SELECT * FROM cache WHERE url=?",(url,))
        cachedResponse = result.fetchone()
        if(cachedResponse is not None):   
            logQuery(url,"CACHE")
            QUERY_CACHE.commit()
            QUERY_CACHE.close()
            return str(cachedResponse[1])
    
    # not in cache/don't cache
    h = Http()
    response = h.request(url, headers={'Accept':'application/xml'})
    
    logQuery(url,"QUERY")
    
    # update cache
    if(DO_CACHING and not nocache):
        QUERY_CACHE.execute("DELETE FROM cache WHERE url=?",(url,))
        QUERY_CACHE.execute("INSERT INTO cache VALUES (?,?)",(url,sqlite3.Binary(response[1])))
        QUERY_CACHE.commit()
        QUERY_CACHE.close()
    
    return response[1]
Example #12
0
class Client(object):

    def __init__(self, server_name):
        self.http = Http()
        self.server_name = 'https://' + server_name

    def get(self, action_path, *values):
        path = action_path + '/'.join([quote(unicode(value)) for value in values])
        url = ''.join((self.server_name, path))
        response, content = self.http.request(url)

        if response['status'] == '500':
            raise FailedApiCall(GetResponse(content).error, url)
        if response['status'] != '200':
            msg = 'Server `%s` answered %s status for `%s`.\n%s'
            raise BadResponse(msg % (self.server_name,
                                     response['status'],
                                     url,
                                     content))
        if response['content-type'] == 'text/xml':
            msg = 'Server `%s` answered %s content-type for `%s`.\n%s'
            raise BaseResponse(msg % (self.server_name,
                                      response['content-type'],
                                      url,
                                      content))
        response = GetResponse(content)
        return response

    def post(self, path, data):
        url = self.server_name + path
        response, content = self.http.request(url, 'POST', data.encode('latin1', 'ignore'))
        return PostResponse(content)
Example #13
0
def ogcproxy(request):
    url = request.params.get("url")
    if url is None:
        return HTTPBadRequest()

    # check for full url
    parsed_url = urlparse(url)
    if not parsed_url.netloc or parsed_url.scheme not in ("http", "https"):
        return HTTPBadRequest()

    # forward request to target (without Host Header)
    http = Http(disable_ssl_certificate_validation=True)
    h = dict(request.headers)
    h.pop("Host", h)
    try:
        resp, content = http.request(url, method=request.method, 
                                     body=request.body, headers=h)
    except:
        return HTTPBadGateway()

    # check for allowed content types
    if resp.has_key("content-type"):
        ct = resp["content-type"]
        if not ct.split(";")[0] in allowed_content_types:
            # allow any content type from allowed hosts (any port)
            if not parsed_url.netloc in allowed_hosts:
                return HTTPForbidden()
    else:
        return HTTPNotAcceptable()

    response = Response(content, status=resp.status,
                        headers={"Content-Type": ct})

    return response
Example #14
0
def compile_html():
    # loads sources from the disk
    file = open("../src/index.html", "r")
    src_html = file.read()
    file.close()
    file = open("../src/js/picedit.js", "r")
    src_js = file.read()
    file.close()
    # extract html code
    phtm = re.compile(r'<!-- begin_picedit_box -->.+<!-- end_picedit_box -->', re.IGNORECASE | re.DOTALL)
    pouthtm = re.findall(phtm, src_html)
    # minify the html
    url = 'http://www.willpeavy.com/minifier/'   
    body = {'html': pouthtm[0]}
    headers = {'Content-type': 'application/x-www-form-urlencoded'}
    http = Http()
    resp, content = http.request(url, "POST", headers=headers, body=urlencode(body))
    outhtml = re.findall('(<textarea.+?>)(.+?)(</textarea)', content)
    #comment and uncomment source and dist code
    unc = re.compile('(\/\*unhide_in_prod\*\/.*?)(\/\*)(.+?)(\*\/)(.*\/\*unhide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
    src_js = unc.sub(r'\1 \3 \5', src_js)
    unc = re.compile('(\/\*hide_in_prod\*\/)(.+)(\/\*hide_in_prod\*\/)', re.IGNORECASE | re.DOTALL)
    src_js = unc.sub(r'\1 /* \2 */ \3', src_js)
    #apply compiled html
    unc = re.compile('compiled_template_markup', re.DOTALL)
    src_js = unc.sub(outhtml[0][1], src_js)
    #save pre-processed js to the dist folder
    file = open("../dist/js/picedit.js", "w")
    file.write(src_js)
    file.close()
    def submit(self):
        #Body content.
        flattened = list(itertools.chain(*self.parts))
        flattened.append('--' + self.boundary + '--')
        flattened.append('')
        body = str('\r\n'.join(flattened))

        #Header content.
        authheader = "Basic %s" % base64.encodestring('%s:%s' % (self.user, self.password))[:-1]
        headerType = 'multipart/form-data; boundary=%s' % self.boundary
        headers = {}
        headers['Authorization'] = authheader
        headers['Content-type'] = headerType

        defaultTimeout = socket.getdefaulttimeout()
        socket.setdefaulttimeout (30)
        try:
            h = Http()
            response, content = h.request(self.url, "POST",  body=body, headers=headers)

            print ("httpPost Response " + str(response))
            if response.get("status") == "401":
                response, content = h.request(self.url, "POST",  body=body, headers=headers)
                print ("httpPost Unauthorized, retryPost response " + str(response))

            # store cookie used for the furture _httpPost
            #cookie = response.get('cache-control')

        except urllib2.HTTPError, e:
            print ('HTTP Response Code: %s')
            raise e
Example #16
0
    def _make_request(self, url, method, payload={}, headers={}):
        """
        A wrapper around httplib2.Http.request.
        
        Required Arguments:
        
            url
                The url of the request.
                
            method
                The method of the request. I.e. 'GET', 'PUT', 'POST', 'DELETE'.
            
        Optional Arguments:
            
            payload
                The urlencoded parameters.
            
            headers
                Additional headers of the request.
                
        """
        try:
            if self._meta.ignore_ssl_validation:
                http = Http(disable_ssl_certificate_validation=True)
            else:
                http = Http()

            if self._auth_credentials:
                http.add_credentials(self._auth_credentials[0], self._auth_credentials[1])

            return http.request(url, method, payload, headers=headers)
        except socket.error as e:
            raise exc.dRestAPIError(e.args[1])
Example #17
0
def build_http_connection(config, timeout=120):
    """
    @config: dict like, proxy and account information are in the following
             format {
                 "username": xx,
                 "password": yy,
                 "proxy_url": zz,
                 "proxy_port": aa,
                 "proxy_username": bb,
                 "proxy_password": cc,
             }
    @return: Http2.Http object
    """

    proxy_info = None
    if config.get("proxy_url") and config.get("proxy_port"):
        if config.get("proxy_username") and config.get("proxy_password"):
            proxy_info = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
                                   proxy_host=config["proxy_url"],
                                   proxy_port=config["proxy_port"],
                                   proxy_user=config["proxy_username"],
                                   proxy_pass=config["proxy_password"])
        else:
            proxy_info = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP,
                                   proxy_host=config["proxy_url"],
                                   proxy_port=config["proxy_port"])
    http = Http(proxy_info=proxy_info, timeout=timeout,
                disable_ssl_certificate_validation=True)
    if config.get("username") and config.get("password"):
        http.add_credentials(config["username"], config["password"])
    return http
def search(cid, phrase, 
    titles = etree.XPath('//*/n:title/text()', namespaces = NAMESPACES),
    host = SAS_BASE + '/product_search'):
    
    params = {
        'search_conf_pid_weight' : '7',
        'search_conf_field_weight' : '10',
        'skip' : '0',
        'rev_config' : '3:250,250,250',
        'search_conf_plugin' : '28',
        'append' : 'csp_data',
        'relevancy_config' : '1',
        'backfill' : '0:40',
        'search_meta' : True,
        'top' : MAX_FETCH,
        'keyword' : phrase,
        'cid' : cid,
        #'ds' : 'astrobds001.shopzilla.laxhq:7005',
    }

    sas_search = '?'.join((host, urlencode(params)))
    h = Http()
    res, content = h.request(sas_search, headers=STD_HEADERS)

    if res.status > 400:
        raise Exception("Invalid response from search request: {%s} response: {%s}" %(sas_search, content))

    root = etree.fromstring(content)
    return sas_search, titles(root)
Example #19
0
def login(user = None, passwd = None, auth_url = None):
    if user is None or passwd is None or auth_url is None:
        return False
    h = Http()
    data = {"DDDDD":user, "upass":passwd, "save_me":1, "R1":0}
    resp, content = h.request(auth_url, "POST", urlencode(data))
    return test()
Example #20
0
  def get(self):
    self.response.headers['Content-Type'] = 'application/json'
    code = self.request.get('code')
    from httplib2 import Http
    from urllib import urlencode
    

    h = Http()
    data = dict(redirect_uri='http://localhost:8080/githubauthback', client_id='baccad0153a85f91041f',
                code=code, client_secret='aad9fc6fbd5f9cca609c76a7b693d3f14bf56bae')
    headers = {'Accept': 'application/json'}
    resp, data = h.request('https://github.com/login/oauth/access_token', 'POST', headers=headers, body=urlencode(data))
    data = json.loads(data)
    self.response.write('Git Hub :\n\n')
    self.response.write(resp)
    self.response.write('\n\n')
    self.response.write(str(data))

    headers = {'Authorization': 'token ' + data['access_token']}
    resp, data = h.request('https://api.github.com/user', 'GET', headers=headers)
    data = json.loads(data)
    self.response.write('\n\n')
    self.response.write(resp)
    self.response.write('\n\n')
    self.response.write(str(data))
Example #21
0
def getstatsoffields(sellerid, categoryid, problem, field):
    try:
        http = Http()
        tempfield = replace_special_char_for_solr([field])[0]
        url = config.SOLR_BASE_URL + "/select?rows=20&indent=true&wt=json&stats=true&q=" 
        condition = "sellerId:%s AND categoryId:%s AND " % (sellerid, categoryid)
        condition = urllib.quote_plus(condition) + "%s:%s" % (urllib.quote_plus(tempfield), urllib.quote_plus(problem))
        url += condition + "&group=true&group.field=itemId&group.limit=1&group.ngroups=true"
        
        
        response, rawBody = solrcallwrapper(url=url + "&rows=0", method="GET", body=None, headers=None)
        data = json.loads(rawBody)
        
        totalcount = data['grouped']['itemId']['ngroups']
        result = {}
        for fielddd in field_list:
            result[fielddd] = 0
        for i in range(0, totalcount, 20):
            response, rawBody = http.request(uri=url + "&start=" + str(i))
            tempdata = json.loads(rawBody)
            for group in tempdata['grouped']['itemId']['groups']:
                for fielddd in field_list:
                    result[fielddd] += group['doclist']['docs'][0][fielddd]

        
        return result
    except Exception, e:
        logger.exception(e)
Example #22
0
def proxy(request):
    """Pass an HTTP request on to another server."""

    # TODO: don't hardcode http
    uri = "http://" + HOST + request.META['PATH_INFO']
    if request.META['QUERY_STRING']:
        uri += '?' + request.META['QUERY_STRING']

    headers = {}
    for name, val in six.iteritems(request.environ):
        if name.startswith('HTTP_'):
            name = header_name(name)
            headers[name] = val

    # TODO: try/except
    http = Http()
    http.follow_redirects = False
    logger.debug("GET for: %s" % uri)
    info, content = http.request(uri, 'GET', headers=headers)
    response = HttpResponse(content, status=info.pop('status'))

    for name, val in info.items():
        if not is_hop_by_hop(name):
            response[name] = val
    logger.info("PROXY to: %s" % uri)
    return response
Example #23
0
def login(user, password, use_cache=True):
    """sigin to renren.com. return and save cookie if success."""
    # TODO:
    # 1. deal with timeout
    # 2. random useragent
    # 3. more accurate headers
    # 5. deal with verfication code and passwd error

    if use_cache:
        cookie = __get_cookie(user)
        if cookie:
            return cookie

    headers = headers_template.copy()
    # url
    url = "http://www.renren.com/PLogin.do"
    home = "http://www.renren.com/home"
    # body
    login_data = {"email": user, "password": password, "origURL": home, "domain": "renren.com"}
    body = urllib.urlencode(login_data)

    h = Http()

    rsp, content = h.request(url, "POST", headers=headers, body=body)  # response 302

    if rsp.get("location", None) == home:  # redirect to home if success
        cookie = rsp["set-cookie"]
        __save_cookie(user, cookie)
        return cookie
    else:  # redirect to login page again if failed
        with open(os.path.join(base_path, "login_failed_{}.html".format(user)), "w") as f:
            f.write(content)
        return None
Example #24
0
    def _send_request(self, method, path, body='', headers={}):
        if TIMEOUTS_AVAILABLE:
            url = self.url.replace(self.path, '')            
            http = Http(timeout=self.timeout)
            headers, response = http.request(url + path, method=method, body=body, headers=headers)
            
            if int(headers['status']) != 200:
                raise SolrError(self._extract_error(headers, response))
            
            return response
        else:
            if headers is None:
                headers = {}
            
            conn = HTTPConnection(self.host, self.port)            
            conn.request(method, path, body, headers)
            

            
            response = conn.getresponse()
            
            if response.status != 200:
                raise SolrError(self._extract_error(dict(response.getheaders()), response.read()))
            
            return response.read()
Example #25
0
 def submit(self, method, request_uri, content=None, content_type=None,
                  content_length=None, chunked=False):
     headers = { 'Connection' : 'Keep-Alive'
               , 'Keep-Alive' : '300'
               }
     repository = self.getRepositoryURL()
     url = self.getRequestURL(request_uri)
     #
     http = Http()
     #http.add_credentials(self.auth_user, self.auth_pwd, self.domain)
     auth = base64.encodestring("%s:%s" % (self.auth_user, self.auth_pwd))
     headers['Authorization'] = 'Basic ' + auth
     if content is None:
         self._last_request = '%s ' % method + url
         response, body = http.request(url, method, headers=headers)
     else:
         self._last_request = '%s (%s) ' % (method, content_type)  + url
         headers['Content-Type'] = content_type
         if content_length:
             headers['Content-Length'] = str(content_length)
         response, body = http.request(url, method, body=content,
                                       headers=headers)
     response = FCRepoResponse(repository, method, request_uri,
                               response, body)
     return response
Example #26
0
def login(website, username, password):
    """auto login and return cookie if success

    return Error code if fails:
    '1': failed to load config
    '2': timeout
    '3': cookie not found in headers or response
    """
    cfg_data = load_config(website)
    if cfg_data is None:  # error
        return '1'

    method = 'POST'
    headers = cfg_data['headers']
    # body
    login_data = cfg_data.get('login_data', dict())
    login_data[cfg_data['field_user']] = username
    login_data[cfg_data['field_password']] = password
    authcode_name = cfg_data['field_authcode']
    if authcode_name:
        headers['Cookie'], login_data[authcode_name] = get_authcode(cfg_data['url_authcode'])
    body = urllib.urlencode(login_data)

    h = Http(timeout=T_TIMEOUT)
    try:
        rsp, content = h.request(cfg_data['url_login'], method, headers=headers, body=body)  # response 302
    except socket.timeout:
        return '2'

    # parse cookie
    clean_cookie_meth = getattr(clean_cookie, website, clean_cookie.raw)
    return clean_cookie_meth(rsp, content) or headers.get('Cookie', '3')
Example #27
0
    def post(self, application=None, event=None, description=None,priority=0):
        # Create the http object
        h = Http()
        
        # Set User-Agent
        headers = {'User-Agent': "Prowlpy/%s" % str(__version__)}
        
        # Perform the request and get the response headers and content
        data = {
            'apikey': self.apikey,
            'application': application,
            'event': event,
            'description': description,
            'priority': priority

        }
        headers["Content-type"] = "application/x-www-form-urlencoded"
        resp,content = h.request("%s/add/" % API_DOMAIN, "POST", headers=headers, body=urlencode(data))
        
        if resp['status'] == '200':
            return True
        elif resp['status'] == '401': 
            raise Exception("Auth Failed: %s" % content)
        else:
            raise Exception("Failed")
Example #28
0
    def _request(self, action, method="GET", args={}):
        query = args.copy()
        data = None
        headers = {}
        if method == "GET":
            url = self._url(action)
        if method == "POST":
            url = self._url(action, scheme="https")
            query["name"] = self.user
            query["pass"] = self.password
            data = urlencode(query)
            headers['Content-type'] = 'application/x-www-form-urlencoded'

        h = Http(cache=None, timeout=self.timeout)
        try:
            #print "%s %s\n> |%s|" % (method, url, data)
            resp, content = h.request(url, method, headers=headers, body=data)
            #print "< %s (%s)" % (content, resp)
            if resp.status == 200:
                data = json.loads(content)
                if "error" in data:
                    raise UserError(data["error"])
                else:
                    return data 
            else:
                raise ServerError(content)
        except AttributeError, e: # 'NoneType' object has no attribute 'makefile'
            raise ServerError("timeout/refused")
Example #29
0
    def check(self, instance):
        url = instance['url']
        default_timeout = self.init_config.get('default_timeout', 5)
        timeout = float(instance.get('timeout', default_timeout))
        tags = instance.get('tags', [])
        service_check_tags = ['url:%s' % url]

        try:
            h = Http(timeout=timeout)
            resp, content = h.request(url, "GET")
        except (socket.timeout, socket.error, HttpLib2Error) as e:
            self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
                               message="Unable to fetch Riak stats: %s" % str(e),
                               tags=service_check_tags)
            raise

        if resp.status != 200:
            self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
                               tags=service_check_tags,
                               message="Unexpected status of %s when fetching Riak stats, "
                               "response: %s" % (resp.status, content))

        stats = json.loads(content)
        self.service_check(
            self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)

        [self.gauge("riak." + k, stats[k], tags=tags) for k in self.keys if k in stats]

        coord_redirs_total = stats["coord_redirs_total"]
        if self.prev_coord_redirs_total > -1:
            count = coord_redirs_total - self.prev_coord_redirs_total
            self.gauge('riak.coord_redirs', count)

        self.prev_coord_redirs_total = coord_redirs_total
Example #30
0
  def get(self):
    self.response.headers['Content-Type'] = 'application/json'

    self.response.write('Fb data\n\nCode : ')
    code = self.request.get('code')
    self.response.write(code)

    from httplib2 import Http
    from urllib import urlencode, unquote
    import json

    h = Http()    

    resp, data = h.request('https://graph.facebook.com/oauth/access_token?' + 'client_id=532876683483897&redirect_uri=http://localhost:8080/fbauthback&client_secret=73694db112e9ecf4b4fd455e28533705&code=' + code, 'GET')
    self.response.write('\n\nResponse : ')
    self.response.write(str(resp) + '\n\nData : ')
    self.response.write(data)

    
    try:
      data = json.loads(data)
    except Exception, e:
      logging.info('=======================')
      logging.info(e)
      logging.info('=======================')
FILE_MASTERLIST = '../data/covid-19_IT.xlsx'
OUTPUT_DIRECTORY = 'Output/'
df = pd.read_excel(FILE_MASTERLIST)


# --------------------------------
# GDrive API: GDrive Authorization
# --------------------------------

SCOPES='https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/spreadsheets'
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets(CLIENT_SECRET, SCOPES)
    creds = tools.run_flow(flow, store)
SERVICE = build('drive', 'v3', http=creds.authorize(Http()))
SS_SERVICE = build('sheets', 'v4', http=creds.authorize(Http()))


PARENT_FOLDER = '1MtOHnLPRJfWJM28DyFjhNN7ef8VUmTnn'


# ------------------------------------
# GDrive API: Check if Filename exists
# ------------------------------------
def fileInGDrive(filename):
    results = SERVICE.files().list(q="mimeType='application/vnd.google-apps.spreadsheet' and name='"+filename+"' and trashed = false and parents in '"+PARENT_FOLDER+"'",fields="nextPageToken, files(id, name)").execute()
    items = results.get('files', [])
    if items:
        return True
    else:
Example #32
0
class HttpTransport(object):
    def __init__(self, url, headers_factory):
        self._api_url = url
        self._headers_factory = headers_factory
        self._supported_methods = ("GET", "POST", "PUT", "HEAD", "DELETE",)
        self._attribute_stack = []
        self._method = "GET"
        self._posts = []
        self._http = Http()
        self._params = {}
        self._url_template = '%(domain)s/%(generated_url)s'
        self._stack_collapser = "/".join
        self._params_template = '?%s'

    def __call__(self, *args, **kwargs):
        self._attribute_stack += [str(a) for a in args]
        self._params = kwargs

        headers = self._headers_factory()

        if 'url' not in kwargs:
            url = self.get_url()
        else:
            url = self.get_url(kwargs['url'])

        if (self._method == "POST" or self._method == "PUT") and 'type' not in kwargs:
            headers.update({'content-type': 'application/json'})
            # Not sure if this will always work, but for validate/verfiy nothing else was working:
            body = json.dumps(kwargs)
        elif 'type' in kwargs:
            if kwargs['type'] == 'multipart/form-data':
                body, new_headers = multipart_encode(kwargs['body'])
                body = "".join(body)
                headers.update(new_headers)
            else:
                body = kwargs['body']
                headers.update({'content-type': kwargs['type']})
        else:
            body = self._generate_body()  # hack
        response, data = self._http.request(url, self._method, body=body, headers=headers)

        self._attribute_stack = []
        handler = kwargs.get('handler', _handle_response)
        return handler(response, data)

    def _generate_params(self, params):
        body = self._params_template % urlencode(params)
        if body is None:
            return ''
        return body

    def _generate_body(self):
        if self._method == 'POST':
            internal_params = self._params.copy()

            if 'GET' in internal_params:
                del internal_params['GET']

            return self._generate_params(internal_params)[1:]

    def _clear_content_type(self):
        """Clear content-type"""
        if 'content-type' in self._headers:
            del self._headers['content-type']

    def _clear_headers(self):
        """Clear all headers"""
        self._headers = {}

    def get_url(self, url=None):
        if url is None:
            url = self._url_template % {
                "domain": self._api_url,
                "generated_url": self._stack_collapser(self._attribute_stack),
            }
        else:
            url = self._url_template % {
                'domain': self._api_url,
                'generated_url': url[1:]
            }
            del self._params['url']

        if len(self._params):
            internal_params = self._params.copy()

            if 'handler' in internal_params:
                del internal_params['handler']

            if self._method == 'POST' or self._method == "PUT":
                if "GET" not in internal_params:
                    return url
                internal_params = internal_params['GET']
            url += self._generate_params(internal_params)
        return url

    def __getitem__(self, name):
        self._attribute_stack.append(name)
        return self

    def __getattr__(self, name):
        if name in self._supported_methods:
            self._method = name
        elif not name.endswith(')'):
            self._attribute_stack.append(name)
        return self
Example #33
0
                    self.json_files.append(self.base_url + attr[1])

    def handle_endtag(self, tag):
        pass

    def handle_data(self, data):
        pass

stig_html= urlopen("https://www.stigviewer.com/stigs").read().decode('utf-8', 'ignore')

stig_parser = STIGParser()
stig_parser.feed(clean_html(stig_html))
stig_parser.first_run = False
# Remove strange link that I can't access in loop
stig_parser.stig_links.remove("https://www.stigviewer.com/stig/symantec_antivirus_managed_client/")

h = Http()
for link in stig_parser.stig_links:
    response = h.request(link, "HEAD")[0]
    if response.status == 200:
        logger.info("Successfully reached {}".format(link))
    else:
        logger.info("Failed! {} does not exist. Removing.".format(link))
        stig_parser.stig_links.remove(link)

for link in stig_parser.stig_links:
    logger.info(link + " is being added")
    json_html = urlopen(link).read().decode('utf-8', 'ignore')

    stig_parser.feed(clean_html(json_html))
Example #34
0
"""
Shows basic usage of the Drive v3 API.

Creates a Drive v3 API service and prints the names and ids of the last 10 files
the user has access to.
"""
from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools

# Setup the Drive v3 API
SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
service = build('drive', 'v3', http=creds.authorize(Http()))

# Call the Drive v3 API
results = service.files().list(
    pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
if not items:
    print('No files found.')
else:
    print('Files:')
    for item in items:
        print('{0} ({1})'.format(item['name'], item['id']))
Example #35
0
                val = dict.get(self, key, default)

            if not val:
                break

            return val


SCOPES = 'https://www.googleapis.com/auth/drive.readonly.metadata'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_id.json', SCOPES)
    creds = tools.run_flow(flow, store)

service = discovery.build('drive', 'v3', http=creds.authorize(Http()))
results = service.files().list(
    pageSize=1000,
    fields=
    "nextPageToken, files(name, permissions/emailAddress, owners/emailAddress)"
).execute()
token = results.get('nextPageToken', None)
items = results.get('files', [])

while token is not None:
    results = service.files().list(
        pageSize=1000,
        pageToken=token,
        fields=
        "nextPageToken, files(name, permissions/emailAddress, owners/emailAddress)"
    ).execute()
Example #36
0
def purge_version(version, mainsite=False, subdomain=False, cname=False):
    varnish_servers = getattr(settings, 'VARNISH_SERVERS', None)
    h = Http()
    if varnish_servers:
        for server in varnish_servers:
            if subdomain:
                #Send a request to the Server, to purge the URL of the Host.
                host = "%s.readthedocs.org" % version.project.slug
                headers = {'Host': host}
                url = "/en/%s/*" % version.slug
                to_purge = "http://%s%s" % (server, url)
                log.info("Purging %s on %s" % (url, host))
                h.request(to_purge, method="PURGE", headers=headers)
            if mainsite:
                headers = {'Host': "readthedocs.org"}
                url = "/docs/%s/en/%s/*" % (version.project.slug, version.slug)
                to_purge = "http://%s%s" % (server, url)
                log.info("Purging %s on readthedocs.org" % url)
                h.request(to_purge, method="PURGE", headers=headers)
                root_url = "/docs/%s/" % version.project.slug
                to_purge = "http://%s%s" % (server, root_url)
                log.info("Purging %s on readthedocs.org" % root_url)
                h.request(to_purge, method="PURGE", headers=headers)
            if cname:
                redis_conn = redis.Redis(**settings.REDIS)
                for cnamed in redis_conn.smembers('rtd_slug:v1:%s' %
                                                  version.project.slug):
                    headers = {'Host': cnamed}
                    url = "/en/%s/*" % version.slug
                    to_purge = "http://%s%s" % (server, url)
                    log.info("Purging %s on %s" % (url, cnamed))
                    h.request(to_purge, method="PURGE", headers=headers)
                    root_url = "/"
                    to_purge = "http://%s%s" % (server, root_url)
                    log.info("Purging %s on %s" % (root_url, cnamed))
                    h.request(to_purge, method="PURGE", headers=headers)
from urllib import urlencode
from httplib2 import Http
import json
import sys
import base64

print "Running Endpoint Tester....\n"
address = raw_input(
    "Please enter the address of the server you want to access, \n If left blank the connection will be set to 'http://localhost:5000':   "
)
if address == '':
    address = 'http://localhost:5000'

#TEST 1: TRY TO REGISTER A NEW USER
try:
    h = Http()
    url = address + '/users'
    data = dict(username="******", password="******")
    data = json.dumps(data)
    resp, content = h.request(url,
                              'POST',
                              body=data,
                              headers={"Content-Type": "application/json"})
    if resp['status'] != '201' and resp['status'] != '200':
        raise Exception('Received an unsuccessful status code of %s' %
                        resp['status'])

except Exception as err:
    print "Test 1 FAILED: Could not make a new user"
    print err.args
    sys.exit()
Example #38
0
class RenrenHandler(object):
    userId = None

    def __init__(self,
                 username=None,
                 password=None,
                 serveraddr="http://www.renren.com/"):
        self.__username = username
        self.__password = password
        self.__serveraddr = serveraddr
        self.__headerTemplate = {
            'Accept':
            'application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
            'Accept-Charset':
            'UTF-8,*;q=0.5',
            'Accept-Encoding':
            'gzip,deflate,sdch',
            'Accept-Language':
            'zh-CN,zh;q=0.8',
            'Cache-Control':
            'max-age=0',
            'Connection':
            'keep-alive',
            'Host':
            'www.renren.com',
            'Referer':
            'http://www.renren.com/Home.do',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.65 Safari/534.24',
        }

        self.__Login()  #登录

    def __Login(self):
        self.myHttpHandler = Http()
        self.myHttpHandler.follow_redirects = False  #失能重定向
        login_header = self.__headerTemplate.copy()
        login_header['Content-type'] = "application/x-www-form-urlencoded"
        login_data = {
            "email": self.__username,
            "password": self.__password,
            "origURL": self.__serveraddr + "home",
            "domain": "renren.com",
        }
        login_url = self.__serveraddr + "PLogin.do"
        try:
            resp, content = self.myHttpHandler.request(
                login_url,
                "POST",
                headers=login_header,
                body=urllib.urlencode(login_data))
        except Exception, e:
            print e
            time.sleep(5)
            sys.exit()

        if resp['status'] == '302':
            self.header = self.__headerTemplate.copy()
            self.header['Cookie'] = resp['set-cookie']
        else:
            self.header = self.__headerTemplate.copy()
            self.header['Cookie'] = resp['set-cookie']

        tmptarget = re.search(" id=\d+[^;]", resp['set-cookie'])
        if tmptarget:
            self.userId = tmptarget.group(0)[4:]  #obtain userId
Example #39
0
def updateCalender():
    textToAnalyze_json = request.get_json()
    textToAnalyze = json.loads(
        json.dumps([
            textToAnalyze_json['selection'], textToAnalyze_json['sentence'],
            textToAnalyze_json['minutes']
        ]))
    # print(textToAnalyze['selection'])
    # print(textToAnalyze['sentence'])
    # print(textToAnalyze['minutes'])
    print(textToAnalyze)

    def read():
        # Call the Calendar API
        now = datetime.datetime.utcnow().isoformat(
        ) + 'Z'  # 'Z' indicates UTC time
        print('Getting the upcoming 10 events')
        events_result = service.events().list(calendarId='primary',
                                              timeMin=now,
                                              maxResults=10,
                                              singleEvents=True,
                                              orderBy='startTime').execute()
        events = events_result.get('items', [])

        if not events:
            print('No upcoming events found.')
        for event in events:
            start = event['start'].get('dateTime', event['start'].get('date'))
            print(start, event['summary'])
        return events

    def order(t):
        work = dict({
            "assignment": "1",
            "project": "1",
            "work": "1",
            "homework": "1",
            "lab": "1",
            "report": "1",
            "paper": "1",
            "math": "1",
            "engineering": "1",
            "biology": "1",
            "physics": "1",
            "boring": "1",
            "job": "1",
            "computer": "1",
            "science": "1",
            "journal": "1",
            "lecture": "1",
            "tutorial": "1",
            "exam": "1",
            "assessment": "1",
            "test": "1"
        })
        costs = np.zeros(7)
        #either work or other
        now = datetime.datetime.utcnow()
        counter = 0
        current_day = now.replace(hour=0, minute=0, second=0,
                                  microsecond=0) + relativedelta(days=1)
        current_day_limit = current_day + relativedelta(days=1)
        while (counter < 7):
            events_result = service.events().list(
                calendarId='primary',
                timeMin=(current_day.isoformat() + 'Z'),
                timeMax=(current_day_limit.isoformat() + 'Z'),
                singleEvents=True,
                orderBy='startTime').execute()
            events = events_result.get('items', [])
            for event in events:
                description = ''
                if (event.get('summary')):
                    description += event.get('summary') + ' : '
                if (event.get('description')):
                    description += event.get('description')
                is_work = False
                for word in description.split():
                    if word in work.keys():
                        is_work = True
                        break
                if ((is_work and t or 'work')
                        or (not is_work and t != 'work')):
                    costs[counter] += 1
            current_day = current_day_limit
            current_day_limit = current_day_limit + relativedelta(days=1)
            counter += 1

        lists = [1]
        for i in range(1, 7):
            counter = 0
            while (costs[i] > costs[counter]):
                counter += 1
            lists.insert(counter, i + 1)
        #for i in range(0, 7):
        #print(lists[i])
        return lists

    def analyze(order, duration):
        options = ['', '', '']
        number_options = 0
        for day in order:
            current_day = datetime.datetime.utcnow() + relativedelta(days=day)
            current_day = current_day.replace(hour=9,
                                              minute=0,
                                              second=0,
                                              microsecond=0)
            #current_day_limit = current_day + relativedelta(days=1)
            current_day_limit = current_day.replace(
                hour=23, minute=0, second=0,
                microsecond=0) - relativedelta(minutes=duration)
            current_time = current_day
            events_result = service.events().list(
                calendarId='primary',
                timeMin=current_day.isoformat() + 'Z',
                timeMax=current_day_limit.isoformat() + 'Z',
                singleEvents=True,
                orderBy='startTime').execute()
            events = events_result.get('items', [])
            if not events:
                #print('No upcoming events found.')
                return current_time
            while (current_time <= current_day_limit):
                #start = event['start'].get('dateTime')
                #start = datetime.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-04:00')
                #end = event['end'].get('dateTime')
                #end = datetime.datetime.strptime(end, '%Y-%m-%dT%H:%M:%S-04:00')
                #print(datetime.datetime.now())
                #print(datetime.datetime.now().replace(hour=23, microsecond=0).isoformat())
                #test = relativedelta(minutes=15)
                #start1 = end + relativedelta(minutes=15)
                #end1 = start1 + relativedelta(minutes=duration+15)
                #end2 = start - relativedelta(minutes=15)
                #start2 = end2 - relativedelta(minutes=duration+15)

                start = current_time - relativedelta(minutes=15)
                end = current_time + relativedelta(minutes=duration + 15)
                #start = current_time
                #end = current_time + relativedelta(minutes=duration)

                body = {
                    "timeMin": start.isoformat() + '-04:00',
                    "timeMax": end.isoformat() + '-04:00',
                    "timeZone": 'America/New_York',
                    "items": [{
                        "id": '*****@*****.**'
                    }]
                }
                eventsResult = service.freebusy().query(body=body).execute()
                #print('The event result is: ')
                #print(start)
                #print(end)
                #print(eventsResult)
                #print(eventsResult[u'calendars'])
                calendar_state = eventsResult[u'calendars']
                #print(test)
                email_state = calendar_state[u'*****@*****.**']
                #print(test)
                busy_state = email_state[u'busy']
                #print(test)
                #print('end')
                if (not busy_state):
                    options[number_options] = current_time.strftime(
                        "%Y-%m-%d %H:%M")
                    number_options += 1
                    current_time = current_time.replace(hour=23)
                    if (number_options == 3):
                        return options
                #cal_dict = eventsResult[u'calendars']
                #for cal_name in cal_dict:
                #    print(cal_name, cal_dict[cal_name])
                current_time = current_time + relativedelta(minutes=15)
        return options

    def insert(name, duration, t):
        day_order = order(t)
        suggestions = analyze(day_order, duration)
        #for s in range(0, len(suggestions)):
        #   print(suggestions[s])
        #json_dump = json.dumps(suggestions, default=json_serial)
        #json_dump=json.dumps(suggestions, indent=4, sort_keys=True, default=str)
        json_dump = json.dumps(
            {
                "0": suggestions[0],
                "1": suggestions[1],
                "2": suggestions[2]
            },
            sort_keys=True)
        #print(json_dump)
        print(json_dump)
        return json_dump

    def schedule(name, duration, t, suggestion):
        #edit the parsing method below based on what the result of suggestion is expected to be
        suggestion = datetime.datetime.strptime(suggestion, '%Y-%m-%d %H:%M')
        #ask front end to pick which time they want
        suggestion_end = suggestion + relativedelta(minutes=duration)
        event = {
            'summary': name,
            'description': t,
            'start': {
                'dateTime': suggestion.isoformat() + '-04:00',
                'timeZone': 'America/New_York',
            },
            'end': {
                'dateTime': suggestion_end.isoformat() + '-04:00',
                'timeZone': 'America/New_York',
            },
            'reminders': {
                'useDefault':
                False,
                'overrides': [
                    {
                        'method': 'email',
                        'minutes': 24 * 60
                    },
                    {
                        'method': 'popup',
                        'minutes': 10
                    },
                ],
            },
        }

        event = service.events().insert(calendarId='primary',
                                        body=event).execute()
        print('Event created: %s' % (event.get('htmlLink')))

    # Setup the Calendar API
    SCOPES = 'https://www.googleapis.com/auth/calendar'
    store = file.Storage('credentials.json')
    creds = store.get()
    if not creds or creds.invalid:
        flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
        creds = tools.run_flow(flow, store)
    service = discovery.build('calendar', 'v3', http=creds.authorize(Http()))
    work = dict({
        "assignment": "1",
        "project": "1",
        "work": "1",
        "homework": "1",
        "lab": "1",
        "report": "1",
        "paper": "1",
        "math": "1",
        "engineering": "1",
        "biology": "1",
        "physics": "1",
        "boring": "1",
        "job": "1",
        "computer": "1",
        "science": "1",
        "journal": "1",
        "lecture": "1",
        "tutorial": "1",
        "exam": "1",
        "assessment": "1",
        "test": "1"
    })
    isWork = False
    sentence = textToAnalyze[1]
    for word in sentence:
        if word in work:
            isWork = True
    if isWork == True:
        schedule(sentence, int(textToAnalyze[2]), 'work', textToAnalyze[0])
    else:
        schedule(sentence, int(textToAnalyze[2]), 'other', textToAnalyze[0])

    #See if it's in work.

    responses = jsonify(response=['Done!'])
    responses.status_code = 200
    return (responses)
Example #40
0
    tweet.retweet()


# how to post tweets

api.update_status("This is a test!")

# sheets stuff
# Setup the Sheets API
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))

# Call the Sheets API
SPREADSHEET_ID = '1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms'  # TODO: change
RANGE_NAME = 'Class Data!A2:E'  # TODO: change
result = service.spreadsheets().values().get(spreadsheetId=SPREADSHEET_ID,
                                             range=RANGE_NAME).execute()
values = result.get('values', [])
if not values:
    print('No data found.')
else:
    print('Name, Major:')
    for row in values:
        # Print columns A and E, which correspond to indices 0 and 4.
        print('%s, %s' % (row[0], row[4]))
Example #41
0
            new_path = "/" + inj_value
    return new_path


print path_inject("", "est.php")

from engine.engine_lib.HttpRequest import HttpRequest

config = {}
url = '''http://www.zjyhxx.com'''
url_parse = urlparse.urlparse(url)
http = HttpRequest({
    'domain': url_parse.netloc,
    'timeout': 20,
    'follow_redirects': True
})
url_404 = "%s://%s/%s.abc" % (
    url_parse.scheme, url_parse.netloc, 'xgegoighig321hihi'
)  # 用当前时间戳和随机数构成不存在的url,后缀.abc
header_404 = {'Host': url_parse.netloc}
# http = HttpRequest({'domain': url_parse.netloc, 'timeout': 20, 'follow_redirects':False})
try:
    http = Http()
    http.follow_redirects = False
    res, config['404_content'] = http.request(url_404,
                                              redirections=5,
                                              headers=header_404)
    config['404_status'] = int(res.get('status', 0))
except Exception, e:
    print e
    config['404_status'] = 200
Example #42
0
from apiclient.discovery import build  #google api library
import base64
from email.mime.text import MIMEText
from oauth2client import file, client, tools
from httplib2 import Http

store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets(
        'credentials.json', 'https://www.googleapis.com/auth/gmail.compose')
    #this url only allows sending, not reading
    #if you want to read you should use 'https://www.googleapis.com/auth/gmail.readonly' this scope.
    creds = tools.run_flow(flow, store)
service = build('gmail', 'v1', http=creds.authorize(Http()))


def create_message(sender, to, subject, message_text):
    """Create a message for an email.

  Args:
    sender: Email address of the sender.
    to: Email address of the receiver.
    subject: The subject of the email message.
    message_text: The text of the email message.

  Returns:
    An object containing a base64url encoded email object.
  """
    message = MIMEText(message_text)
Example #43
0
    def _check(self, instance):
        addr, username, password, timeout, headers, response_time, disable_ssl_validation, pattern, use_keystone = self._load_conf(
            instance)
        config = cfg.Config()
        api_config = config.get_config('Api')
        content = ''

        dimensions = self._set_dimensions({'url': addr}, instance)

        start = time.time()
        done = False
        retry = False
        while not done or retry:
            if use_keystone:
                key = keystone.Keystone(api_config)
                token = key.get_token()
                if token:
                    headers["X-Auth-Token"] = token
                    headers["Content-type"] = "application/json"
                else:
                    self.log.warning("""Unable to get token. Keystone API server may be down.
                                     Skipping check for {0}""".format(addr))
                    return
            try:
                self.log.debug("Connecting to %s" % addr)
                if disable_ssl_validation:
                    self.warning(
                        "Skipping SSL certificate validation for %s based on configuration" % addr)
                h = Http(timeout=timeout, disable_ssl_certificate_validation=disable_ssl_validation)
                if username is not None and password is not None:
                    h.add_credentials(username, password)
                resp, content = h.request(addr, "GET", headers=headers)

            except socket.timeout as e:
                length = int((time.time() - start) * 1000)
                self.log.info(
                    "%s is DOWN, error: %s. Connection failed after %s ms" % (addr, str(e), length))
                self.gauge('http_status', 1, dimensions=dimensions)
                return services_checks.Status.DOWN, "%s is DOWN, error: %s. Connection failed after %s ms" % (
                    addr, str(e), length)

            except HttpLib2Error as e:
                length = int((time.time() - start) * 1000)
                self.log.info(
                    "%s is DOWN, error: %s. Connection failed after %s ms" % (addr, str(e), length))
                self.gauge('http_status', 1, dimensions=dimensions)
                return services_checks.Status.DOWN, "%s is DOWN, error: %s. Connection failed after %s ms" % (
                    addr, str(e), length)

            except socket.error as e:
                length = int((time.time() - start) * 1000)
                self.log.info("%s is DOWN, error: %s. Connection failed after %s ms" % (
                    addr, repr(e), length))
                self.gauge('http_status', 1, dimensions=dimensions)
                return services_checks.Status.DOWN, "%s is DOWN, error: %s. Connection failed after %s ms" % (
                    addr, str(e), length)

            except httplib.ResponseNotReady as e:
                length = int((time.time() - start) * 1000)
                self.log.info("%s is DOWN, error: %s. Network is not routable after %s ms" % (
                    addr, repr(e), length))
                self.gauge('http_status', 1, dimensions=dimensions)
                return services_checks.Status.DOWN, "%s is DOWN, error: %s. Network is not routable after %s ms" % (
                    addr, str(e), length)

            except Exception as e:
                length = int((time.time() - start) * 1000)
                self.log.error(
                    "Unhandled exception %s. Connection failed after %s ms" % (str(e), length))
                self.gauge('http_status', 1, dimensions=dimensions)
                return services_checks.Status.DOWN, "%s is DOWN, error: %s. Connection failed after %s ms" % (
                    addr, str(e), length)

            if response_time:
                # Stop the timer as early as possible
                running_time = time.time() - start
                self.gauge('http_response_time', running_time, dimensions=dimensions)

            # TODO(dschroeder): Save/send content data when supported by API

            if int(resp.status) >= 400:
                if use_keystone and int(resp.status) == 401:
                    if retry:
                        self.log.error("%s is DOWN, unable to get a valid token to connect with" % (addr))
                        return services_checks.Status.DOWN, "%s is DOWN, unable to get a valid token to connect with" % (
                            addr)
                    else:
                        # Get a new token and retry
                        self.log.info("Token expired, getting new token and retrying...")
                        retry = True
                        key.refresh_token()
                        continue
                else:
                    self.log.info("%s is DOWN, error code: %s" % (addr, str(resp.status)))
                    self.gauge('http_status', 1, dimensions=dimensions)
                    return services_checks.Status.DOWN, "%s is DOWN, error code: %s" % (addr, str(resp.status))

            if pattern is not None:
                if re.search(pattern, content, re.DOTALL):
                    self.log.debug("Pattern match successful")
                else:
                    self.log.info("Pattern match failed! '%s' not in '%s'" % (pattern, content))
                    self.gauge('http_status', 1, dimensions=dimensions)
                    return services_checks.Status.DOWN, "Pattern match failed! '%s' not in '%s'" % (
                        pattern, content)

            self.log.debug("%s is UP" % addr)
            self.gauge('http_status', 0, dimensions=dimensions)
            done = True
            return services_checks.Status.UP, "%s is UP" % addr
Example #44
0
def apicall():
    """API Call

    Pandas dataframe (sent as a payload) from API Call
    """
    try:
        # test_json = request.get_json()
        # test = pd.read_json(test_json, orient='records')

        # #To resolve the issue of TypeError: Cannot compare types 'ndarray(dtype=int64)' and 'str'
        # test['Dependents'] = [str(x) for x in list(test['Dependents'])]

        # #Getting the Loan_IDs separated out
        # loan_ids = test['Loan_ID']
        textToAnalyze_json = request.get_json()
        textToAnalyzeWhole = json.loads(json.dumps(textToAnalyze_json['text']))
        textToAnalyze = json.loads(json.dumps(textToAnalyze_json['text']))
        #Split text into array of sentences
        #textToAnalyze = textToAnalyze.split('.')
        #   regular expressions are easiest (and fastest)
        sentenceEnders = re.compile('[.!?]')
        textToAnalyze = sentenceEnders.split(textToAnalyze)
        #print(textToAnalyze)

    except Exception as e:
        raise e

    clf = 'emotion_classifier.pk'
    if textToAnalyze == "":
        return (bad_request())
    else:

        #Load the count vectorizer

        #print("Loading the count vectorizer...")
        count_vect = None
        with open('models/countVectorizer.pk', 'rb') as f:
            count_vect = pickle.load(f)
        #Load the saved model
        #print("Loading the model...")
        loaded_model = None
        with open('models/emotion_classifier.pk', 'rb') as f2:
            loaded_model = pickle.load(f2)

        #print("The model has been loaded...doing predictions now...")
        predictions = []
        sequence = []  #better formatting.
        print("Text", textToAnalyze)
        for sentence in textToAnalyze:
            if sentence != " ":
                predictionIs = loaded_model.predict(
                    (count_vect.transform([sentence])))
                predictions.append(predictionIs)
                sequence.append(predictionIs[0])

        ############################################################################
        #JERRY'S CODE
        ############################################################################
        def read():
            # Call the Calendar API
            now = datetime.datetime.utcnow().isoformat(
            ) + 'Z'  # 'Z' indicates UTC time
            print('Getting the upcoming 10 events')
            events_result = service.events().list(
                calendarId='primary',
                timeMin=now,
                maxResults=10,
                singleEvents=True,
                orderBy='startTime').execute()
            events = events_result.get('items', [])

            if not events:
                print('No upcoming events found.')
            for event in events:
                start = event['start'].get('dateTime',
                                           event['start'].get('date'))
                print(start, event['summary'])
            return events

        def order(t):
            work = dict({
                "assignment": "1",
                "project": "1",
                "work": "1",
                "homework": "1",
                "lab": "1",
                "report": "1",
                "paper": "1",
                "math": "1",
                "engineering": "1",
                "biology": "1",
                "physics": "1",
                "boring": "1",
                "job": "1",
                "computer": "1",
                "science": "1",
                "journal": "1",
                "lecture": "1",
                "tutorial": "1",
                "exam": "1",
                "assessment": "1",
                "test": "1"
            })
            costs = np.zeros(7)
            #either work or other
            now = datetime.datetime.utcnow()
            counter = 0
            current_day = now.replace(
                hour=0, minute=0, second=0,
                microsecond=0) + relativedelta(days=1)
            current_day_limit = current_day + relativedelta(days=1)
            while (counter < 7):
                events_result = service.events().list(
                    calendarId='primary',
                    timeMin=(current_day.isoformat() + 'Z'),
                    timeMax=(current_day_limit.isoformat() + 'Z'),
                    singleEvents=True,
                    orderBy='startTime').execute()
                events = events_result.get('items', [])
                for event in events:
                    description = ''
                    if (event.get('summary')):
                        description += event.get('summary') + ' : '
                    if (event.get('description')):
                        description += event.get('description')
                    is_work = False
                    for word in description.split():
                        if word in work.keys():
                            is_work = True
                            break
                    if ((is_work and t or 'work')
                            or (not is_work and t != 'work')):
                        costs[counter] += 1
                current_day = current_day_limit
                current_day_limit = current_day_limit + relativedelta(days=1)
                counter += 1

            lists = [1]
            for i in range(1, 7):
                counter = 0
                while (costs[i] > costs[counter]):
                    counter += 1
                lists.insert(counter, i + 1)
            #for i in range(0, 7):
            #print(lists[i])
            return lists

        def analyze(order, duration):
            options = ['', '', '']
            number_options = 0
            for day in order:
                current_day = datetime.datetime.utcnow() + relativedelta(
                    days=day)
                current_day = current_day.replace(hour=9,
                                                  minute=0,
                                                  second=0,
                                                  microsecond=0)
                #current_day_limit = current_day + relativedelta(days=1)
                current_day_limit = current_day.replace(
                    hour=23, minute=0, second=0,
                    microsecond=0) - relativedelta(minutes=duration)
                current_time = current_day
                events_result = service.events().list(
                    calendarId='primary',
                    timeMin=current_day.isoformat() + 'Z',
                    timeMax=current_day_limit.isoformat() + 'Z',
                    singleEvents=True,
                    orderBy='startTime').execute()
                events = events_result.get('items', [])
                if not events:
                    #print('No upcoming events found.')
                    return current_time
                while (current_time <= current_day_limit):
                    #start = event['start'].get('dateTime')
                    #start = datetime.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-04:00')
                    #end = event['end'].get('dateTime')
                    #end = datetime.datetime.strptime(end, '%Y-%m-%dT%H:%M:%S-04:00')
                    #print(datetime.datetime.now())
                    #print(datetime.datetime.now().replace(hour=23, microsecond=0).isoformat())
                    #test = relativedelta(minutes=15)
                    #start1 = end + relativedelta(minutes=15)
                    #end1 = start1 + relativedelta(minutes=duration+15)
                    #end2 = start - relativedelta(minutes=15)
                    #start2 = end2 - relativedelta(minutes=duration+15)

                    start = current_time - relativedelta(minutes=15)
                    end = current_time + relativedelta(minutes=duration + 15)
                    #start = current_time
                    #end = current_time + relativedelta(minutes=duration)

                    body = {
                        "timeMin": start.isoformat() + '-04:00',
                        "timeMax": end.isoformat() + '-04:00',
                        "timeZone": 'America/New_York',
                        "items": [{
                            "id": '*****@*****.**'
                        }]
                    }
                    eventsResult = service.freebusy().query(
                        body=body).execute()
                    #print('The event result is: ')
                    #print(start)
                    #print(end)
                    #print(eventsResult)
                    #print(eventsResult[u'calendars'])
                    calendar_state = eventsResult[u'calendars']
                    #print(test)
                    email_state = calendar_state[u'*****@*****.**']
                    #print(test)
                    busy_state = email_state[u'busy']
                    #print(test)
                    #print('end')
                    if (not busy_state):
                        options[number_options] = current_time.strftime(
                            "%Y-%m-%d %H:%M")
                        number_options += 1
                        current_time = current_time.replace(hour=23)
                        if (number_options == 3):
                            return options
                    #cal_dict = eventsResult[u'calendars']
                    #for cal_name in cal_dict:
                    #    print(cal_name, cal_dict[cal_name])
                    current_time = current_time + relativedelta(minutes=15)
            return options

        def insert(name, duration, t):
            day_order = order(t)
            suggestions = analyze(day_order, duration)
            #for s in range(0, len(suggestions)):
            #   print(suggestions[s])
            #json_dump = json.dumps(suggestions, default=json_serial)
            #json_dump=json.dumps(suggestions, indent=4, sort_keys=True, default=str)
            #json_dump=json.dumps({"0": suggestions[0], "1": suggestions[1], "2": suggestions[2]}, sort_keys=True)
            suggestions = {
                "0": [suggestions[0]],
                "1": [suggestions[1]],
                "2": [suggestions[2]]
            }
            # #print(json_dump)
            # print(json_dump)
            #return json_dump
            return suggestions

        def schedule(name, duration, t, suggestion):
            #edit the parsing method below based on what the result of suggestion is expected to be
            suggestion = datetime.datetime.strptime(suggestion,
                                                    '%Y-%m-%dT%H:%M:%S-04:00')
            #ask front end to pick which time they want
            answer = input(
                'Would you like to have an event put on your calendar called '
                + name + ' on ' + suggestion.strftime("%Y-%m-%d at %H:%M") +
                ' oclock for ' + str(duration) + ' minutes? ')
            if (answer != 'no'):
                suggestion_end = suggestion + relativedelta(minutes=duration)
                event = {
                    'summary': name,
                    'description': t,
                    'start': {
                        'dateTime': suggestion.isoformat() + '-04:00',
                        'timeZone': 'America/New_York',
                    },
                    'end': {
                        'dateTime': suggestion_end.isoformat() + '-04:00',
                        'timeZone': 'America/New_York',
                    },
                    'reminders': {
                        'useDefault':
                        False,
                        'overrides': [
                            {
                                'method': 'email',
                                'minutes': 24 * 60
                            },
                            {
                                'method': 'popup',
                                'minutes': 10
                            },
                        ],
                    },
                }

                event = service.events().insert(calendarId='primary',
                                                body=event).execute()
                print('Event created: %s' % (event.get('htmlLink')))

        def neural_network_model(data):

            layer_1 = tf.add(tf.matmul(data, hidden_1_layer['weights']),
                             hidden_1_layer['biases'])
            # now goes through an activation function - sigmoid function
            layer_1 = tf.nn.relu(layer_1)
            # input for layer 2 = result of activ_func for layer 1
            layer_2 = tf.add(tf.matmul(layer_1, hidden_2_layer['weights']),
                             hidden_2_layer['biases'])
            layer_2 = tf.nn.relu(layer_2)

            layer_3 = tf.add(tf.matmul(layer_2, hidden_3_layer['weights']),
                             hidden_3_layer['biases'])
            layer_3 = tf.nn.relu(layer_3)

            output = tf.matmul(
                layer_3, output_layer['weights']) + output_layer['biases']

            return output

        def use_neural_network(input_data):
            prediction = neural_network_model(x)
            with open('models/lexicon.pickle', 'rb') as f:
                lexicon = pickle.load(f)

            with tf.Session() as sess:
                sess.run(tf.global_variables_initializer())
                saver.restore(sess, "models/model.ckpt")
                # import the inspect_checkpoint library
                from tensorflow.python.tools import inspect_checkpoint as chkp

                # print all tensors in checkpoint file
                #chkp.print_tensors_in_checkpoint_file("./model.ckpt", tensor_name='', all_tensors=True)
                #saver.restore(sess,tf.train.latest_checkpoint('./'))

                current_words = word_tokenize(input_data.lower())
                current_words = [
                    lemmatizer.lemmatize(i) for i in current_words
                ]
                features = np.zeros(len(lexicon))

                for word in current_words:
                    if word.lower() in lexicon:
                        index_value = lexicon.index(word.lower())
                        # OR DO +=1, test both
                        features[index_value] += 1

                features = np.array(list(features))
                # pos: [1,0] , argmax: 0
                # neg: [0,1] , argmax: 1
                test = prediction.eval(feed_dict={x: [features]})
                print(test)
                test = test[0]
                difference = abs(test[0] - test[1])
                if (difference >= 50):
                    result = (sess.run(
                        tf.argmax(prediction.eval(feed_dict={x: [features]}),
                                  1)))
                    if result[0] == 0:
                        #print('Positive:',input_data)
                        return 0
                    elif result[0] == 1:
                        #print('Negative:',input_data)
                        return 1
                return 0.5

        lemmatizer = WordNetLemmatizer()
        n_nodes_hl1 = 500
        n_nodes_hl2 = 500
        n_nodes_hl3 = 500

        n_classes = 2
        hm_data = 2000000

        batch_size = 32
        hm_epochs = 10
        x = tf.placeholder('float')
        y = tf.placeholder('float')

        current_epoch = tf.Variable(1)

        hidden_1_layer = {
            'f_fum': n_nodes_hl1,
            'weights': tf.Variable(tf.random_normal([205, n_nodes_hl1])),
            'biases': tf.Variable(tf.random_normal([n_nodes_hl1]))
        }

        hidden_2_layer = {
            'f_fum': n_nodes_hl2,
            'weights': tf.Variable(tf.random_normal([n_nodes_hl1,
                                                     n_nodes_hl2])),
            'biases': tf.Variable(tf.random_normal([n_nodes_hl2]))
        }

        hidden_3_layer = {
            'weights':
            tf.Variable(
                tf.truncated_normal([n_nodes_hl2, n_nodes_hl3], stddev=0.1)),
            'biases':
            tf.Variable(tf.constant(0.1, shape=[n_nodes_hl3]))
        }

        output_layer = {
            'weights':
            tf.Variable(
                tf.truncated_normal([n_nodes_hl3, n_classes], stddev=0.1)),
            'biases':
            tf.Variable(tf.constant(0.1, shape=[n_classes])),
        }
        saver = tf.train.import_meta_graph('models/model.ckpt.meta')

        sentiment = use_neural_network(textToAnalyzeWhole)

        ##########################################################3
        '''
        Find key action words: 
        '''
        powerWords = {
            'frustrated': 'Relax',
            'frustrating': 'Relax',
            'chill': 'Relax',
            'me': 'Depressed',
            'much': 'Relax',
            'my': 'Relax',
            'not': 'Relax',
            'overwhelmed': 'Relax',
            'vacation': 'Relax',
            'crazy': 'Relax',
            'stress': 'Relax',
            'stressed': 'Relax',
            'too': 'Relax',
            'sleep': 'Depressed',
            'burnt': 'Relax',
            'food': 'Relax',
            'control': 'Relax',
            'work': 'Action2',
            'relax': 'Relax',
            'relaxation': 'Relax',
            'hesistate': 'Procrast',
            'lazy': 'Procrast',
            'prolong': 'Procrast',
            'slow': 'Procrast',
            'apathetic': 'Procrast',
            'bored': 'Procrast',
            'boring': 'Procrast',
            'tedium': 'Procrast',
            'anime': 'Procrast',
            'netflix': 'Procrast',
            'movies': 'Procrast',
            'waste': 'Procrast',
            'ice cream': 'Procrast',
            'snack': 'Procrast',
            'binge': 'Procrast',
            'tv': 'Procrast',
            'game': 'Procrast',
            'video': 'Procrast',
            'facebook': 'Procrast',
            'twitter': 'Procrast',
            'instagram': 'Procrast',
            'twitch': 'Procrast',
            'league': 'Procrast',
            'guilt': 'Procrast',
            'shame': 'Procrast',
            'procrastinate': 'Procrast',
            'procrastination': 'Procrast',
            'procrastinated': 'Procrast',
            'wasted': 'Procrast',
            'time': 'Procrast',
            'hesitated': 'Procrast',
            'prolonged': 'Procrast',
            'procrastinating': 'Procrast',
            'wasting': 'Procrast',
            'dark': 'Depressed',
            'destroy': 'Depressed',
            'die': 'Depressed',
            'hate': 'Depressed',
            'kill': 'Depressed',
            'life': 'Depressed',
            'murder': 'Depressed',
            'myself': 'Depressed',
            'revenge': 'Depressed',
            'someone': 'Depressed',
            'understand': 'Depressed',
            'cry': 'Depressed',
            'worst': 'Depressed',
            'enemy': 'Depressed',
            'hurts': 'Depressed',
            'broken': 'Depressed',
            'erase': 'Depressed',
            'evil': 'Depressed',
            'pain': 'Depressed',
            'emotion': 'Depressed',
            'world': 'Depressed',
            'numb': 'Depressed',
            'emotions': 'Depressed',
            'supposed': 'Depressed',
            'strength': 'Depressed',
            'alone': 'Depressed',
            'depression': 'Depressed',
            'depressed': 'Depressed',
            'suicide': 'Depressed',
            'tears': 'Depressed',
            'tear': 'Depressed',
            'hole': 'Depressed',
            'chasm': 'Depressed',
            'burden': 'Depressed',
            'sad': 'Depressed',
            'died': 'Depressed',
            'cried': 'Depressed',
            'asleep': 'Depressed',
            'haze': 'Depressed',
            'energy': 'Depressed',
            'dreading': 'Depressed',
            'do': 'Action2',
            'find': 'Action1',
            'go': 'Action2',
            'need': 'Action1',
            'have': 'Action1',
            'play': 'Action1',
            'want': 'Action1',
            'must': 'Action1',
            'require': 'Action1',
            'required': 'Action1',
            'wish': 'Action1',
            'crave': 'Action1',
            'miss': 'Action1',
            'accomplish': 'Action2',
            'finish': 'Action2',
            'ace': 'Action2',
            'pass': '******',
            'earn': 'Action2',
            'build': 'Action2',
            'achieve': 'Action2',
            'win': 'Action2',
            'create': 'Action2',
            'implement': 'Action2',
            'perform': 'Action2',
            'soon': 'Action2',
            'possible': 'Action2',
            'buy': 'Action2',
            'complete': 'Action2',
            'start': 'Action2',
            'exercise': 'Action2',
            'visit': 'Action2',
            'more': 'Action2',
            'use': 'Action2',
            'make': 'Action2',
            'try': 'Action2',
            'study': 'Action2',
            'accomplishing': 'Action2',
            'finishing': 'Action2',
            'aceing': 'Action2',
            'passing': 'Action2',
            'earning': 'Action2',
            'building': 'Action2',
            'achieveing': 'Action2',
            'wining': 'Action2',
            'createing': 'Action2',
            'implementing': 'Action2',
            'doing': 'Action2',
            'performing': 'Action2',
            'sooning': 'Action2',
            'possibleing': 'Action2',
            'buying': 'Action2',
            'completeing': 'Action2',
            'starting': 'Action2',
            'working': 'Action2',
            'exerciseing': 'Action2',
            'visiting': 'Action2',
            'moreing': 'Action2',
            'useing': 'Action2',
            'makeing': 'Action2',
            'trying': 'Action2',
            'studying': 'Action2',
            'going': 'Action2',
            'learn': 'Action2'
        }
        workWords = dict({
            "assignment": "1",
            "project": "1",
            "work": "1",
            "homework": "1",
            "lab": "1",
            "report": "1",
            "paper": "1",
            "math": "1",
            "engineering": "1",
            "biology": "1",
            "physics": "1",
            "boring": "1",
            "job": "1",
            "computer": "1",
            "science": "1",
            "journal": "1",
            "lecture": "1",
            "tutorial": "1",
            "exam": "1",
            "assessment": "1",
            "test": "1"
        })

        procrastinateCount = 0
        depressionCount = 0
        needRelaxCount = 0
        actionToggled = False
        actionSentence = ''
        sentenceAdded = False
        isWork = False
        workState = False
        i = 0
        for sentence in textToAnalyze:
            sentenceAdded = False
            if (sentence != " "):
                words = re.sub("[^\w]", " ", sentence).split()
                #See if words are in the hashmap
                for word in words:
                    if word in workWords:
                        isWork = True
                    if word in powerWords:
                        if powerWords[word] == 'Depressed' and predictions[i][
                                0] != "joy":
                            depressionCount += 1
                        if powerWords[word] == 'Relax' and predictions[i][
                                0] != "joy":
                            needRelaxCount += 1
                        if powerWords[word] == 'Procrast' and predictions[i][
                                0] != "joy":
                            procrastinateCount += 1
                        if powerWords[word] == 'Action1' or powerWords[
                                word] == 'Action2':
                            if actionToggled == True:
                                if sentenceAdded is False:
                                    actionSentence = sentence
                                    sentenceAdded = True
                                    workState = isWork
                            else:
                                actionToggled = True

            if i < len(predictions):
                i += 1

        print(workState)
        #Evaluate
        pain = max(depressionCount, needRelaxCount, procrastinateCount)
        if pain > int(len(textToAnalyze) * 0.4):
            if procrastinateCount >= pain:
                state = "procrastinate"
            if needRelaxCount >= pain:
                state = "relax"
            if depressionCount >= pain:
                state = "depression"
        else:
            state = "neutral"

        #Apart from primary algo, can miss words.
        counter = 0
        for label in sequence:
            if label == "sadness":
                counter += 1

        if counter >= int(len(textToAnalyze) * 0.6) and counter >= 3:
            state = "depression"

        print(procrastinateCount)
        print(needRelaxCount)
        print(depressionCount)
        print(state)

        SCOPES = 'https://www.googleapis.com/auth/calendar'
        store = file.Storage('credentials.json')
        creds = store.get()
        if not creds or creds.invalid:
            flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
            creds = tools.run_flow(flow, store)
        service = discovery.build('calendar',
                                  'v3',
                                  http=creds.authorize(Http()))
        #j = 0

        if (actionSentence != ""):
            suggestions = insert(actionSentence, 60, 'work')
            # for sentence in actionSentences:
            #     if str(j) in suggestions:
            #         suggestions[str(j)].append(sentence)
            #     j += 1

        else:
            suggestions = {}

        # prediction_series = list(pd.Series(predictions))

        # final_predictions = pd.DataFrame(list(zip(loan_ids, prediction_series)))
        """We can be as creative in sending the responses.
           But we need to send the response codes as well.
        """
        #print(predictions)

        #Create dictionary with everything that I need to return.

        responses = jsonify(predictions=sequence,
                            sentence=actionSentence,
                            mindState=state,
                            calSuggestions=suggestions,
                            sentimentState=sentiment)
        responses.status_code = 200

        return (responses)
Example #45
0
sending through google-api-python-client
to spreadsheet in google drive from abovementioned google account.
'''

import os
import sys
import math
import time
from oauth2client.service_account import ServiceAccountCredentials
from httplib2 import Http
from googleapiclient.discovery import build

scopes = ['https://spreadsheets.google.com/feeds']
credentials = ServiceAccountCredentials.from_json_keyfile_name('/home/jan/klima2_Grpi_secret.json', scopes=scopes)
spreadsheetID = '1fJ5-2CCK3b5yTaENa4srdhkgqqIhx8pj9psYG9SijFo'
http_auth = credentials.authorize(Http())
service = build('sheets', 'v4', http=http_auth)

# reading from a spreadsheet just to see whether communication works
result = service.spreadsheets().values().get(
    spreadsheetId=spreadsheetID,
    range='b1:b3').execute()
print(result)

def add_row_to_google_sheet():
    # request to add one Row at the end of the spreadsheet
    requests = [{
        "appendDimension": {
            "sheetId": 0,
            "dimension": "ROWS",
            "length": 1}}]
Example #46
0
from __future__ import print_function
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools

SCOPES = ('https://www.googleapis.com/auth/apps.groups.settings ' +
          'https://www.googleapis.com/auth/admin.directory.group.readonly')

# Build the connections for 1) Admin SDK Directory API and 2) Groups Settings API
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
groups_service = build('admin', 'directory_v1', http=creds.authorize(Http()))
settings_service = build('groupssettings', 'v1', http=creds.authorize(Http()))

# Initialize the page iterators
group_nextPageToken = None
user_nextPageToken = None

# Iterate over each group
while True:
    group_results = groups_service.groups().list(customer='my_customer', maxResults='100', orderBy='email', pageToken=group_nextPageToken).execute()
    groups = group_results.get('groups', [])
    group_nextPageToken = group_results.get('nextPageToken', None)

    if not groups:
        print('No groups in the domain.')
    else:
        for group in groups:
Example #47
0
class RequestHandler(meta.MetaMixin):
    """
    Generic class that handles HTTP requests.  Uses the Json Serialization
    handler by default, but only 'deserializes' response content.

    Optional Arguments / Meta:

        debug
            Boolean.  Toggle debug console output.  Default: False.

        ignore_ssl_validation
            Boolean.  Whether or not to ignore ssl validation errors.
            Default: False

        response_handler
            An un-instantiated Response Handler class used to return
            responses to the caller.  Default: drest.response.ResponseHandler.

        serialization_handler
            An un-instantiated Serialization Handler class used to
            serialize/deserialize data.
            Default: drest.serialization.JsonSerializationHandler.

        serialize
            Boolean.  Whether or not to serialize data before sending
            requests.  Default: False.

        deserialize
            Boolean.  Whether or not to deserialize data before returning
            the Response object.  Default: True.

        trailing_slash
            Boolean.  Whether or not to append a trailing slash to the
            request url.  Default: True.

        timeout
            The amount of seconds where a request should timeout.
            Default: None

    """
    class Meta:
        debug = False
        ignore_ssl_validation = False
        response_handler = response.ResponseHandler
        serialization_handler = serialization.JsonSerializationHandler
        serialize = False
        deserialize = True
        trailing_slash = True
        allow_get_body = False
        timeout = None

    def __init__(self, **kw):
        super(RequestHandler, self).__init__(**kw)
        self._extra_params = {}
        self._extra_url_params = {}
        self._extra_headers = {}
        self._auth_credentials = ()
        self._http = None

        if 'DREST_DEBUG' in os.environ and \
           os.environ['DREST_DEBUG'] in [1, '1']:
            self._meta.debug = True

        response.validate(self._meta.response_handler)
        if self._meta.serialization_handler:
            serialization.validate(self._meta.serialization_handler)
            self._serialization = self._meta.serialization_handler(**kw)
            headers = self._serialization.get_headers()
            for key in headers:
                self.add_header(key, headers[key])
        else:
            self._meta.serialize = False
            self._meta.deserialize = False

    def _serialize(self, data):
        if self._meta.serialize:
            return self._serialization.serialize(data)
        else:
            return data

    def _deserialize(self, data):
        if self._meta.deserialize:
            return self._serialization.deserialize(data)
        else:
            return data

    def set_auth_credentials(self, user, password):
        """
        Set the authentication user and password that will be used for
        HTTP Basic and Digest Authentication.

        Required Arguments:

            user
                The authentication username.

            password
                That user's password.

        """
        self._auth_credentials = (user, password)
        self._clear_http()

    def add_param(self, key, value):
        """
        Adds a key/value to self._extra_params, which is sent with every
        request.

        Required Arguments:

            key
                The key of the parameter.

            value
                The value of 'key'.

        """
        self._extra_params[key] = value

    def add_url_param(self, key, value):
        """
        Adds a key/value to self._extra_url_params, which is sent with every
        request (in the URL).

        Required Arguments:

            key
                The key of the parameter.

            value
                The value of 'key'.

        """
        self._extra_url_params[key] = value

    def add_header(self, key, value):
        """
        Adds a key/value to self._extra_headers, which is sent with every
        request.

        Required Arguments:

            key
                The key of the parameter.

            value
                The value of 'key'.

        """
        self._extra_headers[key] = value

    def _get_http(self):
        """
        Returns either the existing (cached) httplib2.Http() object, or
        a new instance of one.

        """
        if self._http == None:
            if self._meta.ignore_ssl_validation:
                self._http = Http(disable_ssl_certificate_validation=True,
                                  timeout=self._meta.timeout)
            else:
                self._http = Http(timeout=self._meta.timeout)

            if self._auth_credentials:
                self._http.add_credentials(self._auth_credentials[0],
                                           self._auth_credentials[1])
        return self._http

    def _clear_http(self):
        self._http = None

    def _make_request(self, url, method, payload=None, headers=None):
        """
        A wrapper around httplib2.Http.request.

        Required Arguments:

            url
                The url of the request.

            method
                The method of the request. I.e. 'GET', 'PUT', 'POST', 'DELETE'.

        Optional Arguments:

            payload
                The urlencoded parameters.

            headers
                Additional headers of the request.

        """
        if payload is None:
            if self._meta.serialize:
                payload = self._serialize({})
            else:
                payload = urlencode({})
        if headers is None:
            headers = {}

        try:
            http = self._get_http()
            return http.request(url, method, payload, headers=headers)

        except socket.error as e:
            # Try again just in case there was an issue with the cached _http
            try:
                self._clear_http()
                return self._get_http().request(url,
                                                method,
                                                payload,
                                                headers=headers)
            except socket.error as e:
                raise exc.dRestAPIError(e)

        except ServerNotFoundError as e:
            raise exc.dRestAPIError(e.args[0])

    def _get_complete_url(self, method, url, params):
        url = "%s%s" % (url.strip('/'),
                        '/' if self._meta.trailing_slash else '')

        if method == 'GET':
            url_params = dict(self._extra_url_params, **params)
        else:
            url_params = self._extra_url_params

        if url_params:
            url = "%s?%s" % (url, urlencode(url_params))

        return url

    def make_request(self, method, url, params=None, headers=None):
        """
        Make a call to a resource based on path, and parameters.

        Required Arguments:

            method
                One of HEAD, GET, POST, PUT, PATCH, DELETE, etc.

            url
                The full url of the request (without any parameters).  Any
                params (with GET method) and self.extra_url_params will be
                added to this url.

        Optional Arguments:

            params
                Dictionary of additional (one-time) keyword arguments for the
                request.

            headers
                Dictionary of additional (one-time) headers of the request.

        """
        if params is None:
            params = {}
        if headers is None:
            headers = {}
        params = dict(self._extra_params, **params)
        headers = dict(self._extra_headers, **headers)
        url = self._get_complete_url(method, url, params)

        if self._meta.serialize:
            payload = self._serialize(params)
        else:
            payload = urlencode(params)

        if self._meta.debug:
            print('DREST_DEBUG: method=%s url=%s params=%s headers=%s' % \
                   (method, url, payload, headers))

        if method is 'GET' and not self._meta.allow_get_body:
            payload = ''
            if self._meta.debug:
                print("DREST_DEBUG: supressing body for GET request")

        res_headers, data = self._make_request(url,
                                               method,
                                               payload,
                                               headers=headers)
        unserialized_data = data
        serialized_data = None
        if self._meta.deserialize:
            serialized_data = data
            data = self._deserialize(data)

        return_response = response.ResponseHandler(
            int(res_headers['status']),
            data,
            res_headers,
        )

        return self.handle_response(return_response)

    def handle_response(self, response_object):
        """
        A simple wrapper to handle the response.  By default raises
        exc.dRestRequestError if the response code is within 400-499, or 500.
        Must return the original, or modified, response object.

        Required Arguments:

            response_object
                The response object created by the request.

        """
        response = response_object
        if (400 <= response.status <= 499) or (response.status == 500):
            msg = "Received HTTP Code %s - %s" % (
                response.status, httplib.responses[int(response.status)])
            raise exc.dRestRequestError(msg, response=response)
        return response
Example #48
0
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import file, client, tools
from facebook import *

SCOPES = 'https://www.googleapis.com/auth/calendar'
store = file.Storage(
    'token.pickle'
)  #File that stores user login data -- make file beforehand can just be empty i think
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('credentials.json',
                                          SCOPES)  # file from google
    creds = tools.run_flow(flow, store)
GCAL = discovery.build('calendar', 'v3', http=creds.authorize(Http()))

#currently looping from 0-5 (6 times)
for x in range(0, len(events_Array)):
    EVENT = {
        'summary': events_Array[x]['name'],
        'start': {
            'dateTime': events_Array[x]['start_time']
        },
        'end': {
            'dateTime': events_Array[x]['end_time']
        },
        'location': events_Array[x]['place']['name'],
        'description': events_Array[x]['description'],
    }
from oauth2client import file, client, tools
import StringIO
import random

import apiclient
from email import Utils
from email import MIMEText

# Setup the Admin SDK Reports API
SCOPES = 'https://www.googleapis.com/auth/admin.reports.audit.readonly'
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
service = build('admin', 'reports_v1', http=creds.authorize(Http()))

print('Getting the last 10 login events')
results = service.activities().list(userKey='all',
                                    applicationName='login',
                                    maxResults=10).execute()
activities = results.get('items', [])

if not activities:
    print('No logins found.')
else:
    print('Logins:')
    for activity in activities:
        print('{0}: {1} ({2})'.format(activity['id']['time'],
                                      activity['actor']['email'],
                                      activity['events'][0]['name']))
def test_another_request():
    global host
    http = Http()
    response, content = http.request('http://%s/foobar' % host)
    assert response.status == 200
    assert 'WSGI intercept successful!' in content.decode('utf-8')
Example #51
0
import json
import sys
import base64


print "Running Endpoint Tester....\n"
address = raw_input("Please enter the address of the server you want to access, \n If left blank the connection will be set to 'http://localhost:5000':   ")
if address == '':
	address = 'http://localhost:5000'


 #TEST 1 TRY TO MAKE A NEW USER
try:
	
	url = address + '/users'
 	h = Http()
	#h.add_credentials('TinnyTim', 'Udacity')
 	data = dict(username = "******", password = "******")
 	data = json.dumps(data)
 	resp, content = h.request(url,'POST', body = data, headers = {"Content-Type": "application/json"})
 	print resp,content
	if resp['status'] != '201' and resp['status'] != '200':
 		raise Exception('Received an unsuccessful status code of %s' % resp['status'])
except Exception as err:
	print "Test 1 FAILED: Could not make a new user"
	print err.args
	sys.exit()
else:
	print "Test 1 PASS: Succesfully made a new user"

#TEST 2 ADD NEW BAGELS TO THE DATABASE
Example #52
0
    def __init__(self):
        super(Main, self).__init__()
        self.setupUi(self)

        try:
            self.gsm = GSM(self)
            self.gsm.set_port("COM7")
            self.gsm.set_baud_rate(9600)
            self.gsm.connect()
            self.gsm.start()
        except Exception as error:
            print(error)
        try:
            print("[INDICATOR] Connecting indicators...")
            self.indicator = Serial("COM3", 9600)
        except Exception as error:
            print(error)

        # Available Camera Slots
        self.camWindow_array.append(self.camWindow1)
        self.camWindow_array.append(self.camWindow2)
        self.camWindow_array.append(self.camWindow3)
        self.camWindow_array.append(self.camWindow4)

        self.btn_RegisterIdentity.clicked.connect(self.show_RegistrationForm)
        self.btn_EraseIdentity.clicked.connect(self.show_EraseIdentityForm)
        self.tabs.currentChanged.connect(self.onChangeTab)

        self.upload_manager = threading.Thread(target=self.uploadManager)
        self.upload_manager.daemon = True
        self.upload_manager.start()

        self.camera_limit = len(self.camWindow_array)

        rtsp_mode = QMessageBox.question(self, 'AL-TECHNOLOGIES',
                                         'Do you want to use RTSP Feed?',
                                         QMessageBox.Yes, QMessageBox.No)
        if rtsp_mode == QMessageBox.Yes:
            print("Connecting RTSP Feed...")
            self.mode = True
            for i in range(self.camera_limit):
                cam = Camera(self)
                cam.set_index(i)
                cam.set_mode(self.mode)
                cam.set_hwnd(self.camWindow_array[i].winId())
                cam.result_callback.connect(self.onNewResult)
                cam.start()
                self.camera_feeds.append(cam)
        else:
            print("Connecting Local Feeds...")
            self.mode = False
            for i in range(self.camera_limit):
                cam = Camera(self)
                cam.set_index(i)
                cam.set_mode(self.mode)
                cam.result_callback.connect(self.onNewResult)
                cam.start()
                self.camera_feeds.append(cam)

        scope = "https://www.googleapis.com/auth/drive"
        storage = "token.json"
        credentials = "credentials.json"

        try:
            store = File.Storage(storage)
            creds = store.get()
            if not creds or creds.invalid:
                flow = client.flow_from_clientsecrets(credentials, scope)
                creds = tools.run_flow(flow, store)
            self.service = build('drive', 'v3', http=creds.authorize(Http()))
            self.cloud_directory_service = build('drive',
                                                 'v3',
                                                 http=creds.authorize(Http()))
            self.local_directory_service = build('drive',
                                                 'v3',
                                                 http=creds.authorize(Http()))
            self.root = "1Fr8j9SfgZ7uDvX6i7gHHrjYV7GFq_0mO"
        except Exception as error:
            print(error)
            self.save_local = 1
            print("No internet connection.")
            print("Will be saving on local.")
Example #53
0
async def upload_google_photos(event):
    if event.fwd_from:
        return

    input_str = event.pattern_match.group(2)
    logger.info(input_str)

    if not event.reply_to_msg_id and not input_str:
        await event.edit(
            "©️ <b>[DCLXVI]</b>\nNo one gonna help you 🤣🤣🤣🤣", parse_mode="html"
        )
        return

    token_file = TOKEN_FILE_NAME
    is_cred_exists, creds = await check_creds(token_file, event)
    if not is_cred_exists:
        await event.edit("😏 <code>.gpsetup</code> first 😡😒😒", parse_mode="html")

    service = build("photoslibrary", "v1", http=creds.authorize(Http()))

    # create directory if not exists
    if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
        os.makedirs(TEMP_DOWNLOAD_DIRECTORY)

    file_path = None

    if input_str and os.path.exists(input_str):
        file_path = input_str

    elif not input_str:
        media_message = await event.client.get_messages(
            entity=event.chat_id, ids=event.reply_to_msg_id
        )

        c_time = time.time()
        file_path = await media_message.download_media(
            file=TEMP_DOWNLOAD_DIRECTORY,
            progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
                progress(d, t, event, c_time, "[DOWNLOADING]")
            ),
        )

    logger.info(file_path)

    if not file_path:
        await event.edit("<b>[Stop Spamming]</b>", parse_mode="html")
        return

    file_name, mime_type, file_size = file_ops(file_path)
    await event.edit("`File downloaded, " "gathering upload informations.`")

    async with aiohttp.ClientSession() as session:
        headers = {
            "Content-Length": "0",
            "X-Goog-Upload-Command": "start",
            "X-Goog-Upload-Content-Type": mime_type,
            "X-Goog-Upload-File-Name": file_name,
            "X-Goog-Upload-Protocol": "resumable",
            "X-Goog-Upload-Raw-Size": str(file_size),
            "Authorization": "Bearer " + creds.access_token,
        }
        # Step 1: Initiating an upload session
        step_one_response = await session.post(
            f"{PHOTOS_BASE_URI}/v1/uploads", headers=headers,
        )

        if step_one_response.status != 200:
            await event.edit((await step_one_response.text()))
            return

        step_one_resp_headers = step_one_response.headers
        logger.info(step_one_resp_headers)
        # Step 2: Saving the session URL

        real_upload_url = step_one_resp_headers.get("X-Goog-Upload-URL")
        upload_granularity = int(
            step_one_resp_headers.get("X-Goog-Upload-Chunk-Granularity")
        )
        number_of_req_s = int((file_size / upload_granularity))

        async with aiofiles.open(file_path, mode="rb") as f_d:
            for i in range(number_of_req_s):
                current_chunk = await f_d.read(upload_granularity)

                headers = {
                    "Content-Length": str(len(current_chunk)),
                    "X-Goog-Upload-Command": "upload",
                    "X-Goog-Upload-Offset": str(i * upload_granularity),
                    "Authorization": "Bearer " + creds.access_token,
                }
                logger.info(i)
                logger.info(headers)
                response = await session.post(
                    real_upload_url, headers=headers, data=current_chunk
                )
                logger.info(response.headers)

                await f_d.seek(upload_granularity)
            # await f_d.seek(upload_granularity)
            current_chunk = await f_d.read(upload_granularity)

            logger.info(number_of_req_s)
            headers = {
                "Content-Length": str(len(current_chunk)),
                "X-Goog-Upload-Command": "upload, finalize",
                "X-Goog-Upload-Offset": str(number_of_req_s * upload_granularity),
                "Authorization": "Bearer " + creds.access_token,
            }
            logger.info(headers)
            response = await session.post(
                real_upload_url, headers=headers, data=current_chunk
            )
            logger.info(response.headers)

        final_response_text = await response.text()
        logger.info(final_response_text)

    await event.edit("`Uploaded to Google Photos, " "Getting FILE URI`")

    response_create_album = (
        service.mediaItems()
        .batchCreate(
            body={
                "newMediaItems": [
                    {
                        "description": file_name,
                        "simpleMediaItem": {
                            "fileName": file_name,
                            "uploadToken": final_response_text,
                        },
                    }
                ]
            }
        )
        .execute()
    )
    logger.info(response_create_album)

    try:
        photo_url = (
            response_create_album.get("newMediaItemResults")[0]
            .get("mediaItem")
            .get("productUrl")
        )
        await event.edit(f"`[SUCCESS]`\n\nUploaded to Google Photo [View Photo]({photo_url})")
    except Exception as e:
        await event.edit(str(e))
Example #54
0
from random import randint, choice
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import datetime
from googletrans import Translator
import aiohttp

#setup the calender API
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
store = file.Storage('credentials_calendar.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))

client = wolframalpha.Client(open('WA_KEY').readline().rstrip())


class Api():
    def __init__(self, bot):
        self.bot = bot
        self.menus = {
            "almoço": "almoço",
            "jantar": "jantar",
            "almoço veg": "almoço vegetariano",
            "veg": "almoço vegetariano",
            "jantar veg": "jantar vegetariano"
        }
Example #55
0
def main(args):
    if not args.admin_email:
        print('admin_email must be specified.')
        sys.exit(1)

    if not args.service_account_key_path:
        print('service_account_key_path must be specified.')
        sys.exit(1)

    # Load the json format key that you downloaded from the Google API
    # Console when you created your service account. For p12 keys, use the
    # from_p12_keyfile method of ServiceAccountCredentials and specify the
    # service account email address, p12 keyfile, and scopes.
    service_credentials = Credentials.from_service_account_file(
        args.service_account_key_path,
        scopes=[
            'https://www.googleapis.com/auth/admin.directory.device.chromebrowsers.readonly'
        ],
        subject=args.admin_email)

    try:
        http = google_auth_httplib2.AuthorizedHttp(service_credentials,
                                                   http=Http())
        extensions_list = {}
        base_request_url = 'https://admin.googleapis.com/admin/directory/v1.1beta1/customer/my_customer/devices/chromebrowsers'
        request_parameters = ''
        browsers_processed = 0
        while True:
            print('Making request to server ...')

            retrycount = 0
            while retrycount < 5:
                response = http.request(
                    base_request_url + '?' + request_parameters, 'GET')[1]

                if isinstance(response, bytes):
                    response = response.decode('utf-8')
                data = json.loads(response)
                if 'browsers' not in data:
                    print('Response error, retrying...')
                    time.sleep(3)
                    retrycount += 1
                else:
                    break

            browsers_in_data = len(data['browsers'])
            print('Request returned %s results, analyzing ...' %
                  (browsers_in_data))
            ComputeExtensionsList(extensions_list, data)
            browsers_processed += browsers_in_data

            if 'nextPageToken' not in data or not data['nextPageToken']:
                break

            print('%s browsers processed.' % (browsers_processed))

            if (args.max_browsers_to_process is not None
                    and args.max_browsers_to_process <= browsers_processed):
                print('Stopping at %s browsers processed.' %
                      (browsers_processed))
                break

            request_parameters = ('pageToken={}').format(data['nextPageToken'])
    finally:
        print('Analyze results ...')
        ExtensionListAsCsv(extensions_list, args.extension_list_csv)
        print("Results written to '%s'" % (args.extension_list_csv))
Example #56
0
def buildGoogleApiService(credentials):
    return build('calendar', 'v3', http=credentials.authorize(Http()))
Example #57
0
def get_collected(event_id):
    response = Http().request('http://sandbox.hortonworks.com:8000/events/' + str(event_id) + '/r:collected', 'GET',
                              headers={'Accept': 'application/octet-stream'})
    status = response[0]['status']
    return int(response[1]) if status == '200' else 0
Example #58
0
from bs4 import BeautifulSoup
import re
import time
import dateutil.parser as parser
from datetime import datetime
import datetime
import csv

# Creating a storage.JSON file with authentication details
SCOPES = 'https://www.googleapis.com/auth/gmail.modify'  # we are using modify and not readonly, as we will be marking the messages Read
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('gmail.json', SCOPES)
    creds = tools.run_flow(flow, store)
GMAIL = discovery.build('gmail', 'v1', http=creds.authorize(Http()))

user_id = 'me'
label_id_one = 'INBOX'
label_id_two = 'UNREAD'

# Getting all the unread messages from Inbox
# labelIds can be changed accordingly
#unread_msgs = GMAIL.users().messages().list(userId='me', labelIds=[label_id_one, label_id_two]).execute()
unread_msgs = GMAIL.users().messages().list(userId='me',
                                            maxResults=10).execute()

# We get a dictonary. Now reading values for the key 'messages'
mssg_list = unread_msgs['messages']

print("Total unread messages in inbox: ", str(len(mssg_list)))
Example #59
0
def put_collected(event_id, collected):
    return Http().request('http://sandbox.hortonworks.com:8000/events/' + str(event_id) + '/r:collected', 'PUT',
                           body=str(collected), headers={'content-type': 'application/octet-stream'})
from apiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools

SCOPES = 'https://www.googleapis.com/auth/gmail.readonly'
CLIENT_SECRET = 'client_secret.json'
store = file.Storage('credentials.json')
creds = store.get()
if not creds or creds.invalid:
    flow = client.flow_from_clientsecrets('client_secret.json', SCOPES)
    creds = tools.run_flow(flow, store)
GMAIL = build('gmail', 'v1', http=creds.authorize(Http()))

threads = GMAIL.users().threads().list(
    userId='me', q='from:[email protected]').execute().get('threads', [])

for thread in threads:
    tdata = GMAIL.users().threads().get(userId='me', id=thread['id']).execute()
    nmsgs = len(tdata['messages'])

    msg = tdata['messages'][0]['payload']
    subject = ''
    for header in msg['headers']:
        if header['name'] == 'Subject':
            subject = header['value']
            break
    if subject:
        print(subject)