def get_authorized_client(self, oauth_verifier, oauth_token): import urlparse resp, content = self.get_access_token(oauth_verifier, oauth_token) access_token = dict(urlparse.parse_qsl(content)) token = oauth2.Token(access_token["oauth_token"], access_token["oauth_token_secret"]) token.set_verifier(oauth_verifier) return oauth2.Client(self.get_consumer(), token), access_token
def getFeedChannel(): ''' Get channel list from Amiko receiver and find last channel by freq (if 'freq=610000' in urlencode(i)) before DVB-T2 and return list of [channel name, url], both string ''' url = 'http://{}:8080/cfg/get_channel_list.cgi'.format(ip) data = requests.get(url).text D = {} channels = [] channelsUrls = [] for i in data.split('\n'): if i != '': currentDict = dict(parse.parse_qsl(i)) if 'tpfreq' in list(currentDict.keys()): D[currentDict['tponid'].strip()] = currentDict elif 'svrnam' in list(currentDict.keys()): #d = {**D[currentDict['tponid']], **currentDict} #for python 3 this can be used instead of below line d = merge_two_dicts(D[currentDict['tponid']], currentDict) if d['tppola'] == '0': d['pol'] = 'h' elif d['tppola'] == '1': d['pol'] = 'v' channels.append(d) for i in channels: tempDict = {} try: tempDict['freq'] = i['tpfreq'] tempDict['pol'] = i['pol'] try: tempDict['mtype'] = i['tpmodu'] except: tempDict['mtype'] = '8psk' tempDict['sr'] = i['tpsymb'] APID = i['audpid'].split(',')[0] VPID = i['vidpid'] PMT = i['pmtpid'] tempDict['name'] = i['svrnam'] tempDict['pids'] = ['0', '17', '18', VPID, APID, PMT] channelsUrls.append(tempDict) except: pass s = '' feedUrl = '' for i in channelsUrls: tempPids = ','.join(i['pids']) name = i['name'] del i['pids'] del i['name'] if 'freq=610000' in urlencode(i): return [feedname, feedUrl] else: feedname = name feedUrl = urlencode(i) + '&pids=' + tempPids
def add_params(url, params): import urllib import urlparse url_parts = list(urlparse.urlparse(url)) query = dict(urlparse.parse_qsl(url_parts[4])) query.update(params) url_parts[4] = urllib.urlencode(query) return urlparse.urlunparse(url_parts)
def index1(request): authorize = True consumer = oauth.Consumer(consumer_key, consumer_secret) request.session['consumer'] = consumer client = oauth.Client(consumer) request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken' resp, content = client.request(request_token_url, "POST") request_token = dict(urlparse.parse_qsl(content)) oauth_token = request_token['oauth_token'] request.session['oauth_token'] = oauth_token oauth_token_secret = request_token['oauth_token_secret'] request.session['oauth_token_secret'] = oauth_token_secret fopen = open('jsp.js', 'rb') a = fopen.readline() b = a.replace('myvar', oauth_token) return HttpResponse(b)
def index1(request): authorize=True consumer = oauth.Consumer(consumer_key, consumer_secret) request.session['consumer']=consumer client = oauth.Client(consumer) request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken' resp, content = client.request(request_token_url, "POST") request_token = dict(urlparse.parse_qsl(content)) oauth_token=request_token['oauth_token'] request.session['oauth_token']=oauth_token oauth_token_secret =request_token['oauth_token_secret'] request.session['oauth_token_secret']=oauth_token_secret fopen=open('jsp.js','rb') a=fopen.readline() b=a.replace('myvar',oauth_token) return HttpResponse(b)
def get_params(url,specify_params=False): """ Returns The Base URL,URL Parameters and Given URL """ if specify_params: params=specify_params.split(',') check_params = parseurl.split('&') x=set(check_params) y=set(params) requ_param=list(x.difference(y)) else: params=[] parsed = urlparse.urlparse(url) fun_params = urlparse.parse_qsl(parsed.query) for param in fun_params: params.append(param) return params
def index(request): a = request.session['consumer'] b = request.session['oauth_token'] c = request.session['oauth_token_secret'] q = request.GET.get('oauth_verifier', 'refused') oauth_verifier = q if oauth_verifier: access_token_url = 'https://api.linkedin.com/uas/oauth/accessToken' token = oauth.Token(b, c) token.set_verifier(oauth_verifier) client = oauth.Client(a, token) resp, content = client.request(access_token_url, "POST") access_token = dict(urlparse.parse_qsl(content)) USER_TOKEN = access_token['oauth_token'] USER_SECRET = access_token['oauth_token_secret'] RETURN_URL = 'http://127.0.0.1:8000/lkdn/' auth = linkedin.LinkedInDeveloperAuthentication( consumer_key, consumer_secret, USER_TOKEN, USER_SECRET, RETURN_URL, permissions=linkedin.PERMISSIONS.enums.values()) app = linkedin.LinkedInApplication(auth) x = app.get_profile(selectors=[ 'id', 'firstName', 'lastName', 'location', 'numConnections', 'skills', 'educations', 'group_memberships', 'interests', 'positions' ]) ky = x.keys() count = 0 for i in range(len(ky)): if ky[i] == "skills": count = 1 break if count != 1: updat_dict = {'skills': "null"} x.update(updat_dict) for i in range(len(ky)): if ky[i] == "educations": count = 1 break if count != 1: updat_dict = {'educations': "null"} x.update(updat_dict) for i in range(len(ky)): if ky[i] == "location": count = 1 break if count != 1: updat_dict = {'location': "null"} x.update(updat_dict) for i in range(len(ky)): if ky[i] == "distance": count = 1 break if count != 1: updat_dict = {'distance': "null"} x.update(updat_dict) edu_key(x['educations']['values']) grplen = x['groupMemberships']['_count'] for i in range(grplen): grpid = x['groupMemberships']['values'][i]['group']['id'] grpname = x['groupMemberships']['values'][i]['group']['name'] p1 = usergroup(Userids=x['id'], groupid=grpid, groupname=grpname) p1.save(using="linkedin") p5 = groups(groupid=grpid, groupname=grpname) p5.save(using="linkedin") edulen = x['educations']['_total'] for i in range(edulen): try: deg = x['educations']['values'][i]['degree'] schl = x['educations']['values'][i]['schoolName'] eduid = x['educations']['values'][i]['id'] startDate = x['educations']['values'][i]['startDate'] endDate = x['educations']['values'][i]['endDate'] fieldofstudy = x['educations']['values'][i]['fieldOfStudy'] p2 = usereducation(Userids=x['id'], eduid=eduid, fieldofstudy=fieldofstudy, school=schl, degree=deg, startDate=startDate, endDate=endDate) p2.save(using="linkedin") except: print "sorry" skillen = x['skills']['_total'] for i in range(skillen): try: skill = x['skills']['values'][i]['skill']['name'] skillid = x['skills']['values'][i]['id'] p3 = Userskill(Userids=x['id'], skillid=skillid, skillname=skill) p3.save(using="linkedin") except: print "sorry" poslen = x['positions']['_total'] for i in range(poslen): try: compid = x['positions']['values'][i]['company']['id'] compname = x['positions']['values'][i]['company']['name'] p6 = userposition(ids=x['id'], companyid=compid, compname=compname) p6.save(using="linkedin") except: print "sorry" x1 = app.get_company_updates(compid) a = x1['values'] company_key(a) for j in range(x1['_count']): try: updateKey = x1['values'][j]['updateKey'] timestamp = x1['values'][j]['timestamp'] numLikes = x1['values'][j]['numLikes'] updatecontent = x1['values'][j]['updateContent'][ 'companyJobUpdate']['job']['description'] updatecomments = x1['values'][j]['updateComments'] p7 = company(updateKey=updateKey, updatecontent=updatecontent, companyid=compid, timestamp=timestamp, numLikes=numLikes, updatecomments=updatecomments) p7.save(using="linkedin") except: print "sorry" p5 = userprofile(firstname=x['firstName'], lastname=x['lastName'], Userids=x['id'], location=x['location'], numconnections=x['numConnections'], interests=x['interests']) p5.save(using="linkedin") return HttpResponse( "<script type='text/javascript'>d=window.open('http://www.google.com','_self');d.close();</script>" ) else: return HttpResponse( "<script type='text/javascript'>d=window.open('http://www.google.com','_self');d.close();</script>" )
def __init__(self, durl, mode="r+", username="", password="", architecture=platform.machine(), **kwargs): """ data is an switcher, which allows to use the same interface for many different possible data storage mechanisms and formats. """ self.logger = logging.getLogger("simtoolkit.data.data") self.durl = durl self.mode = "w+" up = urlparse(durl) self.dtype = "file" if up.scheme == "" else up.scheme if up.query != "": upq = dist(urlparse.parse_qsl(up.query)) if "mode" in upq: self.mode = upq['mode'] self.path = dburl if up.path == "" else up.path self.username = up.username self.password = up.password if type(mode) is str: if mode != "": self.mode = mode.lower() else: self.logger.error( "----------------------------------------------------") self.logger.error(" DATA ERROR in __init__") self.logger.error( " Incorrect type of mode argument. It should be a str. {} is given" .format(type(mode))) self.logger.error( "----------------------------------------------------") raise TypeError( "Incorrect type of mode argument. It should be a str. {} is given" .format(type(mode))) if type(username) is str: if username != "": self.username = username else: self.logger.error( "----------------------------------------------------") self.logger.error(" DATA ERROR in __init__") self.logger.error( " Incorrect type of username argument. It should be a str. {} is given" .format(type(username))) self.logger.error( "----------------------------------------------------") raise TypeError( "Incorrect type of username argument. It should be a str. {} is given" .format(type(username))) if type(password) is str: if password != "": self.password = password else: self.logger.error( "----------------------------------------------------") self.logger.error(" DATA ERROR in __init__") self.logger.error( " Incorrect type of password argument. It should be a str. {} is given" .format(type(password))) self.logger.error( "----------------------------------------------------") raise TypeError( "Incorrect type of password argument. It should be a str. {} is given" .format(type(password))) #Default values if self.dtype == "": self.dtype = "file" if self.dtype == "file": if os.path.isdir(self.path): self.logger.error( "----------------------------------------------------") self.logger.error(" DATA ERROR in __init__") self.logger.error(" The {} is a directory".format(self.path)) self.logger.error( "----------------------------------------------------") raise ValueError("The {} is a directory".format(self.path)) cmd = {} # Coppy and use only relevant to data_file key parameters for i in 'compress', 'npcompress', 'parallel', 'maxbuffersize', 'autocorrection', 'autodefragmentation': if i in kwargs: cmd[i] = kwargs[i] if self.mode == "r+" or self.mode == "a" or self.mode == "wr" or self.mode == "rw": if os.path.exists( self.path) and not os.access(self.path, os.W_OK): self.logger.warning( "----------------------------------------------------") self.logger.warning(" DATABASE ERROR in __init__") self.logger.warning( " File {} is read-only - open in ro mode".format( self.path)) self.logger.warning( "----------------------------------------------------") self.data = data_file(self.path, mode="ro", **cmd) else: self.data = data_file(self.path, mode="r+", **cmd) elif self.mode == "w": if os.path.exists(self.path): if not os.access(self.path, os.W_OK): self.logger.error( "----------------------------------------------------" ) self.logger.error(" DATA ERROR in __init__") self.logger.error( " The file {} is read-only. Cannot open it in 'w' mode" .format(self.path)) self.logger.error( "----------------------------------------------------" ) raise ValueError( "The file {} is read-only. Cannot open it in 'w' mode" .format(self.path)) self.data = data_file(self.path, mode="w", **cmd) elif self.mode == "ro": self.data = data_file(self.path, mode="ro", **cmd) else: self.logger.error( "----------------------------------------------------") self.logger.error(" DATA ERROR in __init__") self.logger.error(" Unknown mode {}".format(self.mode)) self.logger.error(" mode should be 'r+', 'w', or 'ro'") self.logger.error( "----------------------------------------------------") raise ValueError("Unknown mode {}".format(self.mode)) #elif self.dbtype == "hdf5" #elif self.dbtype == "data-server" #elif self.dbtype == "something-else-to-think-about" else: self.logger.error( "----------------------------------------------------") self.logger.error(" DATAE ERROR in __init__") self.logger.error( " Data base connector for {} isn't implemented yet".format( self.dbtype)) self.logger.error( "----------------------------------------------------") raise ValueError( "Data base connector for {} isn't implemented yet".format( self.dbtype)) #Redirection to the data class self.__enter__ = self.data.__enter__ self.__exit__ = self.data.__exit__ self.sync = self.data.sync self.__len__ = self.data.__len__ self.__add__ = self.data.__add__ self.__iadd__ = self.data.__iadd__ self.__setitem__ = self.data.__setitem__ self.__getitem__ = self.data.__getitem__ self.__delitem__ = self.data.__delitem__ self.__call__ = self.data.__call__ self.__contains__ = self.data.__contains__ self.__iter__ = self.data.__iter__ self.aggregate = self.data.aggregate self.dict = self.data.dict self.defragmentation = self.data.defragmentation
def download_file(datafile, scraped_title, book, page, maxpage): with open(datafile) as f: logging.info("Parsing %s, creating download url" % datafile) lines = f.readlines() dw_options = parse_qs(lines[0]) title = dw_options["title"][0] if title != scraped_title: logging.info("Found real title: %s" % (title,)) logging.info("Parsed data for book '%s'" % (title,)) url = dw_options["assemble_url"][0] params = {} for param in ["user_id","product_id","codec", "awtype","cust_id"]: if dw_options[param][0] == "LC_64_22050_stereo": params[param] = "LC_64_22050_ster" else: params[param] = dw_options[param][0] url_parts = list(urlparse.urlparse(url)) query = dict(urlparse.parse_qsl(url_parts[4])) query.update(params) url_parts[4] = urlencode(query) url = urlparse.urlunparse(url_parts) logging.info("Book URL: %s" % url) logging.info("Downloading file data") request_head = HeadRequest(url) request_head.add_header('User-Agent', 'Audible ADM 6.6.0.19;Windows Vista Service Pack 1 Build 7601') tries = 0 head_ok = False while head_ok == False: try: head = urllib2.urlopen(request_head) head_ok = True except urllib2.HTTPError as e_head: if tries < 5: tries = tries + 1 time.sleep(60) else: raise e_head except socket.error as se: if tries < 5: tries = tries + 1 time.sleep(60) else: raise e_head val, par = cgi.parse_header(head.info().dict['content-disposition']) filename = par['filename'].split("_")[0] filename = filename + "." + par['filename'].split(".")[-1] size = head.info().dict['content-length'] logging.info("Filename: %s" % filename) logging.info("Size: %s" % size) path = "%s%s" % (options.dw_dir, filename) logging.info("Book %s of 20 on page %s of %s" % (book, page, maxpage)) if os.path.isfile(path): logging.info("File %s exist, checking size", path) if int(size) == os.path.getsize(path): logging.info("File %s has correct size, not downloading" % (path,)) time.sleep(60) # sleep a minute to not be throttled return False else: logging.warning("File %s had unexpected size, downloading" % (path,)) else: logging.info("File %s does not exist, downloading" % (path,)) if True: opener = LyingFancyURLopener() local_filename, headers = opener.retrieve(url, path, reporthook=print_progress) #local_filename, headers = urlretrieve(url, path, reporthook=print_progress) #import pdb; pdb.set_trace() #filename = "" #try: # val, par = cgi.parse_header(headers.dict['content-disposition']) # filename = par['filename'].split("_")[0] # filename = filename + "." + par['filename'].split(".")[-1] #except KeyError: # import pdb; pdb.set_trace() #logging.info("Filename: %s" % filename) #logging.info("Size: %s" % size) #path = "%s%s" % (options.dw_dir, filename) #os.rename(local_filename,path) logging.info("Completed download of '%s' to %s" % (title, path)) else: logging.info("Completed download of '%s' to %s (not really)" % (title, path)) return True
import requests from urlparse import urlparse import oauth2 as oauth import urlparse from linkedin import linkedin from lnkdin.models import project import json import webbrowser consumer_key = "75nanr9qwylt2d" consumer_secret = "UpORpgoDH1iUl2kL" authorize=True consumer = oauth.Consumer(consumer_key, consumer_secret) client = oauth.Client(consumer) request_token_url = 'https://api.linkedin.com/uas/oauth/requestToken' resp, content = client.request(request_token_url, "POST") request_token = dict(urlparse.parse_qsl(content)) oauth_token=request_token['oauth_token'] oauth_token_secret =request_token['oauth_token_secret'] fopen=open('jsp.js','rb') a=fopen.readline() b=a.replace('myvar',oauth_token) def index1(request): return HttpResponse(b) def index(request): q=request.GET.get('oauth_verifier', 'refused') oauth_verifier =q if oauth_verifier: access_token_url = 'https://api.linkedin.com/uas/oauth/accessToken' token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret']) token.set_verifier(oauth_verifier) client = oauth.Client(consumer, token)
def index(request): a=request.session['consumer'] b=request.session['oauth_token'] c=request.session['oauth_token_secret'] q=request.GET.get('oauth_verifier', 'refused') oauth_verifier =q if oauth_verifier: access_token_url = 'https://api.linkedin.com/uas/oauth/accessToken' token = oauth.Token(b,c) token.set_verifier(oauth_verifier) client = oauth.Client(a, token) resp, content = client.request(access_token_url, "POST") access_token = dict(urlparse.parse_qsl(content)) USER_TOKEN=access_token['oauth_token'] USER_SECRET=access_token['oauth_token_secret'] RETURN_URL = 'http://127.0.0.1:8000/lkdn/' auth = linkedin.LinkedInDeveloperAuthentication(consumer_key, consumer_secret,USER_TOKEN,USER_SECRET,RETURN_URL,permissions=linkedin.PERMISSIONS.enums.values()) app = linkedin.LinkedInApplication(auth) x = app.get_profile(selectors=['id', 'firstName', 'lastName', 'location', 'numConnections', 'skills', 'educations','group_memberships','interests','positions']) ky=x.keys() count=0 for i in range(len(ky)): if ky[i]=="skills": count=1 break if count!=1: updat_dict={'skills':"null"} x.update(updat_dict) for i in range(len(ky)): if ky[i]=="educations": count=1 break if count!=1: updat_dict={'educations':"null"} x.update(updat_dict) for i in range(len(ky)): if ky[i]=="location": count=1 break if count!=1: updat_dict={'location':"null"} x.update(updat_dict) for i in range(len(ky)): if ky[i]=="distance": count=1 break if count!=1: updat_dict={'distance':"null"} x.update(updat_dict) grplen= x['groupMemberships']['_count'] for i in range(grplen): grpid = x['groupMemberships']['values'][i]['group']['id'] grpname = x['groupMemberships']['values'][i]['group']['name'] p1=usergroup(Userids=x['id'],groupid = grpid, groupname=grpname) p1.save(using="linkedin") p5 = groups(groupid=grpid,groupname=grpname) p5.save(using="linkedin") edulen=x['educations']['_total'] for i in range(edulen): deg = x['educations']['values'][i]['degree'] schl = x['educations']['values'][i]['schoolName'] eduid = x['educations']['values'][i]['id'] startDate = x['educations']['values'][i]['startDate'] endDate = x['educations']['values'][i]['endDate'] #fieldofstudy = x['educations']['values'][i]['fieldOfStudy'] p2=usereducation(Userids = x['id'],eduid=eduid,school=schl,degree=deg,startDate=startDate,endDate=endDate) p2.save(using="linkedin") skillen = x['skills']['_total'] for i in range(skillen): skill=x['skills']['values'][i]['skill']['name'] skillid=x['skills']['values'][i]['id'] p3=Userskill(Userids = x['id'],skillid=skillid,skillname=skill) p3.save(using="linkedin") poslen = x['positions']['_total'] for i in range(poslen): compid = x['positions']['values'][i]['company']['id'] compname = x['positions']['values'][i]['company']['name'] p6=userposition(ids=x['id'],companyid=compid,compname=compname) p6.save(using="linkedin") x1= app.get_company_updates(compid) for j in range(x1['_count']): updateKey = x1['values'][j]['updateKey'] timestamp = x1['values'][j]['timestamp'] numLikes = x1['values'][j]['numLikes'] updatecontent=x1['values'][j]['updateContent']['companyJobUpdate']['job']['description'] updatecomments=x1['values'][j]['updateComments'] p7 = company(updateKey=updateKey,companyid=compid,timestamp=timestamp,numLikes=numLikes,updatecomments=updatecomments) p7.save(using="linkedin") p5 = userprofile(firstname = x['firstName'], lastname = x['lastName'], Userids = x['id'], location = x['location'],numconnections = x['numConnections'],interests=x['interests']) p5.save(using="linkedin") return HttpResponse("<script type='text/javascript'>d=window.open('http://www.google.com','_self');d.close();</script>") else: return HttpResponse("<script type='text/javascript'>d=window.open('http://www.google.com','_self');d.close();</script>")