def init(config): OAuth.config = config OAuth.cert = os.path.join(os.path.dirname(__file__), 'script_interface.crt') OAuth.key = os.path.join(os.path.dirname(__file__), 'script_interface.key') if not OAuth.start(): url = "https://127.0.0.1:5555/" webbrowser.open(url, new=1, autoraise=True) return OAuth.stop()
def __init__(self, downloading_match_file_keywords=None, update_mister_tuple=None): super(Updater, self).__init__() self.downloading_match_file_keywords = downloading_match_file_keywords self.update_mister_tuple = update_mister_tuple self.oauth = OAuth() self.oauth.load() print2ln('Prepare download files...')
def twitter_logged_in(blueprint, token): if not token: flash("Failed to log in.", category="error") return redirect(url_for('main.index')) resp = blueprint.session.get("account/verify_credentials.json?include_email=true") if not resp.ok: msg = "Failed to fetch user info." flash(msg, category="error") return redirect(url_for('main.index')) info = resp.json() user_id = info.get("id", None) query = OAuth.query.filter_by( provider=blueprint.name, provider_user_id=str(user_id)) try: oauth = query.one() except NoResultFound: oauth = OAuth( provider=blueprint.name, provider_user_id=user_id, token=token) if oauth.user: user = oauth.user else: # Create a new local user account for this user username = info.get("name", "No name") email = info.get( "email", "Check request email address from users in your twitter app" ) user = User( username=username.lower(), email=email, created=dt.now(), token=token_urlsafe(), token_expiration=dt.now() ) password_generated = get_random_password_string(10) user.set_password(password_generated) # Associate the new local user account with the OAuth token oauth.user = user db.session.add_all([user, oauth]) db.session.commit() flash(_l("Successfully twitter connection"), 'success') login_user(user) return redirect(url_for('main.index'))
def get_language_counts(start=0, end=3, file_end=100): report_dict={} with open(report_file(), "w") as output: for idir in range(start, end): dir_name = "../repos/part{}".format(idir) for ifile in range(0,file_end): file_number = idir*100+ifile file_name = "repos_{}.json".format(file_number) with open(dir_name+"/"+file_name) as f: dic = json.loads(f.read()) for d in dic: url = "{}?access_token={}".format(d["languages_url"],OAuth.token()) langs = json.loads(urllib.urlopen(url).read()) if u'message' in langs: # some repos may have been deleted since the repos were recorded continue for lang, count in langs.iteritems(): if lang in report_dict: report_dict[lang]["lines"] += int(count) report_dict[lang]["repos"] += 1 else: report_dict[lang]={} report_dict[lang]["lines"] = int(count) report_dict[lang]["repos"] = 1 items = sorted(report_dict.items(), key=lambda x:x[1]["repos"], reverse=True) print >> output,"language,lines,repos" for item in items: print >> output, "{},{},{}".format(item[0],item[1]["lines"],item[1]["repos"])
def get_from_url(url): results = {} url = "{}?per_page=100".format(url) page=1 next_url = "{}&page={}".format(url,page) events_json = urllib.urlopen(next_url).read() events = json.loads(events_json) # for progress bar last_page = __get_last_page(next_url) if last_page>0: bar = __get_progressbar(last_page) print "Getting issue event history. About {} API calls will be made".format(last_page) bar.start() # read page by page while type(events) is list and len(events)>0: for event in events: results[event["id"]]=[event["id"], event["created_at"]] if last_page>0: bar.update(page) page += 1 next_url = "{}&access_token={}&page={}".format(url,OAuth.token(),page) events_json = urllib.urlopen(next_url).read() events = json.loads(events_json) if last_page>0: bar.finish() return results
def get_from_url(url): results = {} url = "{}?per_page=100".format(url) commits_json = urllib.urlopen(url).read() commits = json.loads(commits_json) print "Getting commit history" # read page by page while type(commits) is list and len(commits)>1: for commit in commits: results[commit["sha"]]=commit["commit"]["author"]["date"] next_url = "{}&access_token={}&sha={}".format(url,OAuth.token(),commits[-1]["sha"]) commits_json = urllib.urlopen(next_url).read() commits = json.loads(commits_json) return results
def bandura(): global p global g_p global g_7digital_cur global g_7digital_nexts global g_song_features global g_nexts_features global g_next_choice print "bandura" context = packContext( 0 ); if 1: #request.method == "POST": if request.method == "POST": print "'bandura' called by POST" updateParamsFromSite( request.form ) context = packContext( 0 ); if g_7digital_nexts: print "cur song id exists" print g_7digital_nexts g_7digital_cur = g_7digital_nexts[0][g_next_choice] g_song_features = g_nexts_features[g_next_choice] song_url = OAuth.get_song_url(g_7digital_cur); g_7digital_nexts = MX_traverse.pickNextSongsWrapped( g_7digital_cur ); g_nexts_features = [MX_traverse.get_song_by_7digital( x ) for x in g_7digital_nexts[0]] if g_p['save_playlist']: MX_traverse.addSongToPlaylist( g_p['playlist_file'], g_song_features, song_url ) context['song_url'] = song_url+"#t="+str(g_p['play_interval'][0])+","+str(g_p['play_interval'][1]) context['id_7digital'] = g_7digital_cur context['title'] = g_song_features[p['invkey']['title']] context['artist'] = g_song_features[p['invkey']['artist_name']] for i in range(len(g_nexts_features)): context['n'+str(i)+'_id_7digital'] = g_nexts_features[i][p['invkey']['track_7digitalid']] context['n'+str(i)+'_title'] = g_nexts_features[i][p['invkey']['title']] context['n'+str(i)+'_artist'] = g_nexts_features[i][p['invkey']['artist_name']] else: print "cur song id does NOT exists" else: print "'bandura' called by GET" print "rendering bandura" return render_template("bandura.html", **context);
def play_test(): if request.method == "POST": # if request.form["user_play"] == "PlayThis": print "this!" # echonest_id = request.form["user_play"] # id_7digital = get_7digital_id( echonext_id ) # song_url = OAuth.get_song_url(id_7digital) # print song_url # return render_template("play_specific.html", api_data=song_url) # else: print "next! " + request.form["user_play"]; # echonest_id = "SOBOAQC12A8C13E3E9"#request.form["user_play"] # echonest_id_next = MX_traverse.pickNextSongWrapped( echonest_id ) #echonext_id # print echonest_id, echonest_id_next id_7digital = request.form["user_play"]#MX_traverse.get_7digital_id( echonest_id_next ) song_url = OAuth.get_song_url(id_7digital); # print song_url id_7digital_next = MX_traverse.pickNextSongWrapped( id_7digital );#echonext_id request.form["user_play"] = id_7digital_next return render_template("play_specific.html", api_data=song_url) else: print "moshe" return render_template("play_test.html")
def search_raw(q, limit=10, sort="", order="desc"): url = "https://api.github.com/search/repositories?access_token={}&q={}&sort={}&order={}".format(OAuth.token(),q,sort,order) results_json = urllib.urlopen(url).read() results = json.loads(results_json) # IPython.embed() return {"total_count":results["total_count"], "items":results["items"][:limit]}
# -*- coding: utf-8 -*- """ Created on Tue Sep 23 12:21:18 2014 @author: eyalshiv """ #import sys #from flask import Flask, render_template, request sys.path.append('/users/eyalshiv/DI/musixplore/src/7digital') import OAuth id_7digital = "123456" song_url = OAuth.get_song_url(id_7digital) print song_url
__author__ = 'Aguarate - @aguarate' # -*- coding:UTF-8 import tweepy, OAuth, URLs, os auth=OAuth.login() api = tweepy.API(auth) import time import random ID_sv = os.path.expanduser('~/.botijo_lastid') if not os.path.exists(ID_sv): since = "533935298606952448" else: f = open(ID_sv, 'r') since = f.readline().strip() f.close() frases = ("miaaau", "toma, tu gato gordo", "MIAU!", "prrr prrrr") while 1: twts = api.search(q=["#gatogordo"], since_id=since) for status in twts: print (status.user.screen_name + ">> " + status.text + " ID: {}".format(status.id)) api.update_status("@" + status.user.screen_name + " " + URLs.gatos[random.randrange(0,URLs.gatos.__len__(),1)] + " " + frases[random.randrange(0,frases.__len__(),1)], in_reply_to_status_id=status.id) since = "{}".format(status.id)
__author__ = 'xiangwenwen' import sys import subprocess import urllib.request import os import douban_client.api import OAuth sys.path.append('src') import douban_controlCenter client = OAuth.addOAuth() if client == 'error': print('error oauth') else: while True: shell = douban_controlCenter.center() if shell == 'exit': exit() pass try: douban_controlCenter.shellDic[shell](client) pass except KeyError: print(shell + ' is not a douban shell command see douban help') continue except KeyboardInterrupt: exit() pass
import clr clr.AddReference('System.Xml.Linq') import OAuth from System import * from System.Xml.Linq import * def xname(x): return XName.Get(x) print 'Getting lists...' lists = OAuth.requestTwitter("http://api.twitter.com/1/lists/all.xml") # can not use lists.IsSome or lists.IsNone - exception 'Unhandled Exception: System.InvalidProgramException: Common Language Runtime detected an invalid program.' was thrown if lists.Value <> None: listsDoc = XDocument.Parse(lists.Value.Item1) print "Information about Twitter lists:" toprint = [] for list in listsDoc.Descendants(xname("list")): info = (list.Element(xname("id")).Value, list.Element(xname("name")).Value, list.Element(xname("full_name")).Value, list.Element(xname("member_count")).Value, list.Element(xname("subscriber_count")).Value) toprint.append(info) if len(toprint) == 0: "You have no list" else:
from flask import Flask, redirect, url_for, session, request, jsonify #from flask_oauthlib.client import OAuth #import os app = Flask(__name__) app.config[ 'GOOGLE_ID'] = "cloud.google.com/596422529465-652tfp1aerkj92s5hjdhsui5t4h54k86.apps.googleusercontent.com" app.config['GOOGLE_SECRET'] = "cloud.google.com/TegOpmHdBrf3PgHXsBNxEMZH" app.debug = True app.secret_key = 'development' oauth = OAuth(app) google = oauth.remote_app( 'google', consumer_key=app.config.get('GOOGLE_ID'), consumer_secret=app.config.get('GOOGLE_SECRET'), request_token_params={'scope': 'email'}, base_url='https://www.googleapis.com/oauth2/v1/', request_token_url=None, access_token_method='POST', access_token_url='https://accounts.google.com/o/oauth2/token', authorize_url='https://accounts.google.com/o/oauth2/auth', ) @app.route('/') def index(): if 'google_token' in session: me = google.get('userinfo') return jsonify({"data": me.data})
import OAuth limits.Stop() OAuth.registerOnTwitter()
def report_contributor(item, folder_name): subfolder_name = "{}/{}".format(folder_name, item["owner"]["login"]) if not os.path.exists(subfolder_name): os.makedirs(subfolder_name) repo_id = item["id"] url = "https://api.github.com/repositories/{}/contributors?access_token={}".format(repo_id, OAuth.token()) contributors_json = urllib.urlopen(url).read() # IPython.embed() if len(contributors_json) > 0: contributors = json.loads(contributors_json) # IPython.embed() records = [[c["login"], c["contributions"]] for c in contributors] with open("{}/{}.csv".format(subfolder_name, item["name"]), "w") as f: print >> f, "username,contributions" for record in records: print >> f, "{},{}".format(record[0], record[1])
class Updater(object): """docstring for Updater""" def __init__(self, downloading_match_file_keywords=None, update_mister_tuple=None): super(Updater, self).__init__() self.downloading_match_file_keywords = downloading_match_file_keywords self.update_mister_tuple = update_mister_tuple self.oauth = OAuth() self.oauth.load() print2ln('Prepare download files...') ''' read and set: updater config-file(if not exists or empty, then upgrade from default-repository-list) ''' def set_updater_repository_list(self): repository_list = read_updater_repository_list() # deal as default: if not repository_list: return gen_repository_list = [] for repository in repository_list: current_repository = repository.strip(' ') if not current_repository: continue gen_repository_list.append(current_repository) if not gen_repository_list: return self.update_mister_tuple = gen_repository_list ''' install-tips ''' def gen_install_tips(self): self_name = type(self).__name__ print_fill_tips('''\ #################################################################################### # # # MiSTer %s script by %s, %s # # # IMPORTANT: Use this script is limited by the request frequency of GitHub API v3. # # If the prompt is frequent, please wait for 1 hour and try again! # # Or you can create file: `%s`, and paste your GitHub-Token. # # Please visit and setting GitHub-Token: %s . # (Be careful keep your Token safety and do not leak out! This is very important!) # # (Token: Read-only permissions are sufficient!) # # # # Prerequisites: # # * python.version >= 2.7 # # * pip ~ pip3 # # # #################################################################################### ''' % (self_name, Author.name, Author.bio, OAuth.OAUTH_FILENAME, OAuth.APPLY_GITHUB_TOKEN_URL)) update_mister_ln = ''.join(('%s[' % (NEXT_LINE_STR)) + str(unit) + ']' for unit in self.update_mister_tuple) print2ln('Prepare to upgrade repository files!%s %s' % (NEXT_LINE_STR, update_mister_ln)) ''' setup and upgrade: download! ''' def setup_and_upgrade_download(self, upgrade_tips=False): # upgrade-tips if upgrade_tips: self.gen_install_tips() # set match-tuple set_global_downloading_match_file_keywords( self.downloading_match_file_keywords) # oauth-headers, allow nil oauth_headers = self.oauth.gen_oauth_headers() # upgrade and download, delete oldest files! upgrade_download(self.update_mister_tuple, oauth_headers) # upgrade-tips if upgrade_tips: self.eof() ''' eof ''' def eof(self): print2ln( '''All done. Please copy all successfully downloaded files of the current directory to the root directory of the SD card. Ensuring that the same type of `.rbf` file retains only the latest one.''')
def cross_plot_contributions_on_other_repos(folder, owner, repo): with open("reports/{}/{}/{}.csv".format(folder, owner, repo)) as csvfile: csvfile.readline() line = str.rstrip(csvfile.readline()) topcontributor = str.split(line, ",")[0] url = "https://api.github.com/users/{}/repos?sort=updated&access_token={}".format(topcontributor, OAuth.token()) repos_json = urllib.urlopen(url).read() repos = json.loads(repos_json) ids = [item["id"] for item in repos] result_list = [] for r in repos: repoid = r["id"] url = "https://api.github.com/repositories/{}/contributors?access_token={}".format(repoid, OAuth.token()) contribution_json = urllib.urlopen(url).read() contribution = json.loads(contribution_json) result_dic = {} for u in contribution: dic_key = "{}".format(u["login"]) result_dic[dic_key] = u["contributions"] result_list.append(result_dic) IPython.embed() with open("reports/{}/{}/{}.cross.csv".format(folder, owner, repo), "w") as o: with open("reports/{}/{}/{}.csv".format(folder, owner, repo)) as f: f.readline() for line in f.readlines(): newline = str.rstrip(line) username = str.split(newline, ",")[0] for dic in result_list: if username in dic: newline += ",{}".format(dic[username]) else: newline += ",0" print >> o, newline
import enterCreds import OAuth e = enterCreds createParams = OAuth.ParamsAndHeaders(e.links['request'][1], e.links['request'][0], e.consumerKey, e.consumerSecret, "", "") headers = createParams.createHeader() str = '' for tuple in headers[0]: for item in tuple: str += item print str
def save_json(repos, idir=0, ifile=0): if not os.path.exists("repos"): os.makedirs("repos") if idir > 0: if not os.path.exists("repos/part{}".format(idir)): os.makedirs("repos/part{}".format(idir)) with open("repos/part{}/repos_{}.json".format(idir, ifile), "w") as f: print >> f, repos else: with open("repos/repos_{}.json".format(ifile), "w") as f: print >> f, repos link = "https://api.github.com/repositories?access_token={}".format(OAuth.token()) def run_in_range_since(folder_range, lastseen, file_range=range(0, 100)): for idir in folder_range: print "{} {}/600".format(datetime.datetime.now().time(), idir) for ifile in file_range: link2 = link + "&since={}".format(lastseen) repos_json = urllib.urlopen(link2).read() repos = json.loads(repos_json) if len(repos) == 0: return idir, idir * 100 + ifile - 1, lastseen save_json(repos_json, idir, idir * 100 + ifile) lastseen = repos[-1]["id"] return idir, idir * 100 + ifile, lastseen
def search(q, limit=10, sort="", order="desc"): url = "https://api.github.com/search/repositories?access_token={}&q={}&sort={}&order={}".format(OAuth.token(),q,sort,order) results_json = urllib.urlopen(url).read() results = json.loads(results_json) total_count = results['total_count'] items = [item['full_name'].encode("utf-8") for item in results['items'][:limit]] return items
to a website and the latte one to the pic, and I will only erase the second one ''' try: startpoint = re.search(r'@+\w*', status_text).end() endpoint = re.search(r'htt(p|ps)://[\w./]*$', status_text).start() status_text = status_text[startpoint+1:endpoint] except: endpoint = re.search(r'htt(p|ps)://[\w./]*$', status_text).start() status_text = status_text[:endpoint] # Tweet out the truncated text judging if there're pictures try: url = t.statuses.user_timeline(screen_name=self.user_name)[self.tweet_index]['extended_entities']['media'][0]['media_url'] imagefile = urlopen(url) imagedata = imagefile.read() params = {"media[]": imagedata, "status": status_text} t.statuses.update_with_media(**params) except: t.statuses.update(status = status_text) def retweet(self): t.statuses.retweet(id=t.statuses.user_timeline(screen_name=self.user_name)[self.tweet_index]['id']) def log(self): print t.statuses.user_timeline(screen_name=self.user_name)[self.tweet_index]['text'] print t.statuses.user_timeline(screen_name=self.user_name)[self.tweet_index]['extended_entities']['media'][0]['media_url'] if __name__ == '__main__': t = OAuth.Create_Apps() object = TweetSender('chihuo_lele', 1) object.log()
__author__ = 'Aguarate - @aguarate' # -*- coding:UTF-8 import tweepy, OAuth, URLs, os auth = OAuth.login() api = tweepy.API(auth) import time import random ID_sv = os.path.expanduser('~/.botijo_lastid') if not os.path.exists(ID_sv): since = "533935298606952448" else: f = open(ID_sv, 'r') since = f.readline().strip() f.close() frases = ("miaaau", "toma, tu gato gordo", "MIAU!", "prrr prrrr") while 1: twts = api.search(q=["#gatogordo"], since_id=since) for status in twts: print(status.user.screen_name + ">> " + status.text + " ID: {}".format(status.id)) api.update_status( "@" + status.user.screen_name + " " + URLs.gatos[random.randrange(0, URLs.gatos.__len__(), 1)] + " " +