def get_dropbox_session(token_key=None, token_secret=None): sess = dropbox_session.DropboxSession( get_config('dropbox_app_key'), get_config('dropbox_app_secret'), get_config('dropbox_app_type') ) if token_secret and token_key: sess.set_token(token_key, token_secret) return sess
def create_session(username, password): """ Create a session for the user, and then return the key. """ user = User.objects.get_user_by_password(username, password) auth_session_engine = get_config('auth_session_engine') if not user: raise InvalidInput('Username or password incorrect') session_key = random_string(15) while auth_session_engine.get(session_key): session_key = random_string(15) auth_session_engine.set(session_key, user.username, get_config('auth_session_expire')) return {'session_key': session_key, 'user': user}
def create_user(self, username, password): if password == '': # skip hashing process if the password field is left blank # helpful for creating mock user objects without slowing things down. pass_hash = '' else: pass_hash = bcrypt.hashpw(password, bcrypt.gensalt()) r = get_config('auth_regex', r'^[\d\w]{4,30}$') errors = {} if not re.match(r, username): errors['username'] = { 'message': 'Username not valid', 'value': username } if len(password) < 4: errors['password'] = { 'message': 'Password much be at least 4 characters' } if User.objects.filter(username=username).exists(): errors['username'] = { 'message': 'Username already exists', 'value': username } if errors: raise InvalidInput("User data not valid", **errors) return User.objects.create(username=username, pass_hash=pass_hash)
def migrate_onto_engine(engine_id, user=LOGGED_IN_USER): session = get_config('db_session') engine = session.query(StorageEngine, Library)\ .filter(StorageEngine.id==engine_id)\ .filter(Library.identity==user.username).first() engine.migrate_onto() return Redirection('/settings')
def submitter_color(self): cache = get_config('cache_engine') key = "user/%s" % self.submitter json = cache.get(key) if not json: res = requests.get("http://hn-karma-tracker.herokuapp.com/user/%s.json" % self.submitter) if res.status_code != 200: return 'black' json = res.json() cache.set(key, json, 24 * 3600) percentile = float(json['month_data']['percentile']) * 100 if percentile < 10: return 'lime' if percentile < 30: return 'green' if percentile < 50: return 'orange' if percentile < 80: return 'orangered' if percentile < 95: return 'red' if percentile < 99: return 'darkred' return 'black'
def reset_metadata(self, metadata): session = get_config('db_session') session.query(MetaData).filter_by(item=self).delete() for key, value in metadata.items(): m = MetaData(key=key, value=value, item=self) session.add(m) session.commit()
def http(self, request, response): if request.method == 'POST' and 'auth_session' in request.POST: key = request.POST['auth_session'] session_key = get_config('auth_session').set(key, None, 1) # nuke session response.delete_cookie('giotto_session') return response
def do_crawl(cls, drug, total_pages): bitcoin_price = float(requests.get("http://api.bitcoinaverage.com/ticker/USD").json()['last']) try: parsed = open("%s_data/parsed.json" % drug).read() listings = json.loads(parsed) except IOError: listings = parse_listings(drug=drug, total_pages=total_pages) parsed = json.dumps(listings) with open("%s_data/parsed.json" % drug, 'w') as f: f.write(parsed) new = [] for listing in listings: listing["unit_price"] = listing['price'] * bitcoin_price / listing['quantity'] new.append(listing) prices = np.array([listing['unit_price'] for listing in listings]) filtered = reject_outliers(prices) outliers_count = len(prices) - len(filtered) average_price = sum(filtered) / len(filtered) m = cls( json=parsed, bitcoin_to_usd=bitcoin_price, created=datetime.datetime.now(), drug=drug, outliers_count=outliers_count, worldwide_avg_per_ounce=average_price, ) session = get_config("db_session") session.add(m) session.commit()
def by_city(cls, city_slug): session = get_config('db_session') albums = session.query(cls).filter_by(city_slug=city_slug).all() if not albums: raise DataNotFound city = albums[0].city return {'albums': albums, 'length': len(albums), 'city': city}
def get_metadata(self, key): """ Get metadata for this """ session = get_config('db_session') result = session.query(MetaData).filter_by(item=self, key=key).first() return (result and result.value) or ''
def create(cls, data=ALL_DATA): d = data.get('date', None) date = dateutil.parser.parse(d) if d else None session = get_config('db_session') a = cls( title=data['title'], date=date, venue=data.get('venue', None), bucket=data['bucket'], folder=data['folder'], encoding=data['encoding'], source=data['source'], city=data.get('city', None), ) session.add(a) for i in xrange(int(data['num_of_tracks']) + 1): title = data['title_%s' % i] track = data['track_%s' % i] s3_name = data['s3_%s' % i] dur = data['duration_%s' % i] info = data['info_%s' % i] d = data.get('date_%s' % i, None) if not title: continue date = dateutil.parser.parse(d) if d else None s = Song( title=title, info=info, date=date, track=track, s3_name=s3_name, album=a, duration=dur ) session.add(s) session.commit() return a
def by_venue(cls, venue_slug): session = get_config('db_session') albums = session.query(cls).filter_by(venue_slug=venue_slug).all() if not albums: raise DataNotFound venue = albums[0].venue return {'albums': albums, 'length': len(albums), 'venue': venue}
def get_file(path=RAW_INVOCATION_ARGS): fullpath = get_config('project_path') + os.path.join(base_path, path) try: mime, encoding = mimetypes.guess_type(fullpath) return open(fullpath, 'rb'), mime or 'application/octet-stream' except IOError: raise DataNotFound("File does not exist")
def all_songs(cls): selects = func.max(cls.title), cls.slug, func.count(cls.title) session = get_config('db_session') songs = session.query(*selects)\ .group_by(cls.slug)\ .order_by(cls.slug)\ .all() return {'songs': songs, 'length': len(songs)}
def addS3(secret_key, access_token, user=LOGGED_IN_USER): bucket_name = get_bucket_name(user.username, get_config('domain'), access_token, secret_key) library.add_storage('s3', { 'aws_key': access_token, 'aws_secret': secret_key, 'bucket_name': bucket_name } )
def show_connections(user=LOGGED_IN_USER): conns = Connection.objects.all() return { 'site_domain': get_config('domain'), 'username': user.username, 'existing_connections': [x for x in conns if not x.pending], 'pending_connections': [x for x in conns if x.pending] }
def get_dropbox_authorize_url(user, callback_scheme='http'): """ Get the url for sending the user to authenticate with dropbox. """ sess = get_dropbox_session() request_token = sess.obtain_request_token() url = "%s://%s/dropbox/oauth1callback" % (callback_scheme, get_config("domain")) DropboxRequestToken.create(user, request_token) return sess.build_authorize_url(request_token, oauth_callback=url)
def profile(cls, slug): """ All songs for a title slug """ session = get_config('db_session') songs = session.query(cls).join(Album).filter(cls.slug==slug).order_by(Album.date).all() if not songs: raise DataNotFound title = songs[0].title return {'songs': songs, 'title': title, 'length': len(songs)}
def get(cls, id, user=LOGGED_IN_USER): session = get_config('db_session') ret = session.query(cls)\ .filter_by(id=id)\ .filter_by(library_identity=user.username)\ .first() if not ret: raise DataNotFound() return ret
def get_google_flow(scheme="http"): """ Wrapper for calling `flow_from_clientsecrets` from the google authentication API. """ url = '%s://%s/google/oauth2callback' % (scheme, get_config('domain')) return flow_from_clientsecrets( 'client_secrets.json', scope='https://www.googleapis.com/auth/drive', redirect_uri=url, )
def update_from_crawl(self, data): """ Update this submission instance with the latest comment counts/points/rank/etc. """ self.comments = data['comments'] self.points = data['points'] self.current_rank = data['current_rank'] if self.current_rank <= self.peak_rank or not self.peak_rank: self.peak_rank = self.current_rank session = get_config('db_session') session.add(self) session.commit()
def has(self, size=None, hash=None): """ Does this size/hash pair exist in my library? """ session = get_config('db_session') items = session.query(MetaData, Library)\ .filter(Library.identity == self.identity)\ .filter( (MetaData.key == 'size' and MetaData.value == size) and (MetaData.key == 'hash' and MetaData.value == hash) ) return len(items.all()) > 0
def finish_publish(hash, metadata, engine_id=None, username=USER): """ After the client's upload is complete, this api is hit to actually finish the publish process. """ identity = "%s@%s" % (username, get_config('domain')) library = Library.objects.get(identity=identity) library.add_item( engine_id=engine_id, origin=identity, metadata=metadata ) return "OK"
def get_bucket_contents(bucket, folder): from boto.s3.connection import S3Connection from boto.exception import S3ResponseError conn = S3Connection(get_config('aws_access_key'), get_config('aws_secret_access_key')) try: bucket = conn.get_bucket(bucket) except S3ResponseError: raise DataNotFound("Can't open S3 bucket") length = len(folder) ret = [] for k in bucket.list(folder): key = bucket.get_key(k.key) # refetch so we can get metadata obj = { "filename": key.name[length+1:], # remove the folder name from each result "duration": duration_to_hms(key.get_metadata('x-content-duration')) } ret.append(obj) if len(ret) == 0: raise DataNotFound("Empty Bucket or folder") return ret
def http(self, request): user = None session_key = request.cookies.get('giotto_session', None) if not session_key and request.POST: session_key = request.POST.get('auth_session', None) if session_key: username = get_config('auth_session_engine').get(session_key) try: user = User.objects.get(username=username) except User.DoesNotExist: user = None setattr(request, 'user', user) return request
def cmd(self, request): user = None session_key = request.enviornment.get('GIOTTO_SESSION', None) if session_key: user = get_config('auth_session').get(session_key) if not user: print("Username:") username = raw_input() password = getpass.getpass() user = User.objects.get_user_by_password(username, password) setattr(request, 'user', user) return request
def _upload_to_engine(identity, filename, size, hash): """ Call the server, get the engine info, then proceed to do the upload. Afterwords, return the url of the newly uploaded file. """ data = {"size": size, "hash": hash} url = "https://%s/api/startPublish.json" % identity username = identity.split('@')[0] try: response = requests.post(url, data=data, auth=(username, ''), verify=(not get_config('debug'))) except requests.exceptions.ConnectionError as exc: raise Exception("Could not connect to Library Server: %s, %s" % (url, exc)) code = response.status_code if code != 200: msg = response.error raise Exception("Library Server Error: (%s) %s" % (code, msg)) ext = filename.split('.')[-1] endfilename = "%s.%s.%s" % (size, hash, ext) count = 0 for count, engine in enumerate(response.json(), 1): # engine data is transmitted as a base64 encoded pickle. connect_data = pickle.loads(base64.b64decode(engine['data'])) name = engine['name'] id = engine['id'] try: if name == 's3': return id, upload_s3(filename, endfilename, connect_data) if name == 'googledrive': return id, upload_google_drive(filename, endfilename, connect_data) if name == 'dropbox': return id, upload_dropbox(filename, endfilename, connect_data) except Exception as exc: print "upload to %s failed: %s" % (name, exc) if not count: raise Exception("No Storage Engines Configured") raise Exception("Upload failed.")
def create_user(self, username, password): if password == '': # skip hashing process if the password field is left blank # helpful for creating mock user objects without slowing things down. pass_hash = '' else: pass_hash = bcrypt.hashpw(password, bcrypt.gensalt()) r = get_config('auth_regex', r'^[\d\w]{4,30}$') errors = {} if not re.match(r, username): errors['username'] = {'message': 'Username not valid', 'value': username} if len(password) < 4: errors['password'] = {'message': 'Password much be at least 4 characters'} if User.objects.filter(username=username).exists(): errors['username'] = {'message': 'Username already exists', 'value': username} if errors: raise InvalidInput("User data not valid", **errors) return User.objects.create(username=username, pass_hash=pass_hash)
def all_update(cls, submissions): """ Passed in is a list of dicts representing the data crawled from the hacker news front page. Out is the number os new submissions found. This function updates the database tables and should be run on a regular schedule. """ new = 0 session = get_config('db_session') session.query(Submission).update({'current_rank': None}) for sub in submissions: try: submission = session.query(Submission).get(sub['hn_id']) submission.update_from_crawl(sub) except AttributeError: submission = Submission(date_created=datetime.datetime.now(), **sub) session.add(submission) print "New submission found... getting %s's karma percentile" % submission.submitter submission.submitter_color # to warm up the cache new += 1 session.commit() return new
def render_error_page(code, exc, mimetype='text/html', traceback=''): """ Render the error page """ from giotto.views import get_jinja_template if 'json' in mimetype: return json.dumps({ 'code': code, 'exception': exc.__class__.__name__, 'message': str(exc), }) et = get_config('error_template') if not et: return "%s %s\n%s" % (code, str(exc), traceback) template = get_jinja_template(et) return template.render( code=code, exception=exc.__class__.__name__, message=str(exc), traceback=traceback )
def __init__(self, request, manifest, model_mock=False, errors=None): self.request = request self.model_mock = model_mock self.cache = get_config('cache_engine', DummyKeyValue()) self.errors = errors self.manifest = manifest self.middleware_interrupt_exc = None self.middleware_control = None self.display_data = 'Not calculated yet' # the program that corresponds to this invocation invocation = self.get_invocation() name = self.get_controller_name() parsed = self.manifest.parse_invocation(invocation, controller_tag=name) self.raw_args = parsed['raw_args'] self.program = parsed['program'] self.program.name_on_manifest = parsed['program_name'] self.path_args = parsed['args'] if parsed['superformat']: self.mimetype = parsed['superformat_mime'] or parsed['superformat'] else: self.mimetype = self.mimetype_override() or self.default_mimetype
def add_album(index): session = get_config('db_session') try: j = open("data/%s.json" % index, 'r').read() except IOError: return False obj = json.loads(j) songs = obj['songs'] del obj['songs'] obj['date_added'] = dateutil.parser.parse(obj['date_added']) d = obj['date'] obj['date'] = dateutil.parser.parse(d) if d else None a = Album(**obj) session.add(a) for song_data in songs: d = song_data.get('date', None) song_data['date'] = dateutil.parser.parse(d) if d else None s = Song(**song_data) session.add(s) session.commit() return "Added album #%s" % index
def get_jinja_template(template_name): ppx = get_config('project_path') env = Environment(loader=FileSystemLoader(os.path.join(ppx, 'views'))) return env.get_template(template_name)
import datetime import requests import json from giotto import get_config from crawl import parse_listings from sqlalchemy import Column, String, DateTime, Float, Integer import numpy as np Base = get_config("Base") def stats(): stats = [] for drug in ['mdma', 'weed']: latest = CrawlResult.get_latest(drug) stats.append({ 'drug': drug, 'price': latest.worldwide_avg_per_ounce, 'date': latest.created }) return stats def show_listings(drug): latest = CrawlResult.get_latest(drug=drug) listings = json.loads(latest.json) if drug == "weed": drug = "Weed" if drug == 'mdma': drug = "MDMA" return {
def get_file(): mime, encoding = mimetypes.guess_type(file_path) fullpath = os.path.join(get_config('project_path'), file_path) return open(fullpath, 'rb'), mime or 'application/octet-stream'
def get_latest(cls, drug): session = get_config("db_session") return session.query(cls).filter_by(drug=drug).order_by(cls.created.desc())[0]