#!/usr/bin/env python #!/usr/bin/env python ''' This server manages a queue, which is shared by the workers and the autoscaler. It also generates tokens. The queue is on port 6200. ''' import remotequeue from bottle import route, response, run, request import random # set up queue manager AUTH_KEY = 'changeinprod' MANAGED_QUEUE = remotequeue.make(AUTH_KEY, public=True) # constants LETTERS = 'abcefghijklmnopqrstuvwxyz' URL_CACHE = {} def url_test(url): ''' tests to see if the str passed is a fb profile image url''' # needs significant testing! if 'http' not in url: return False return 'facebook' in url.lower() or 'fb' in url.lower() def generate_token(): ''' returns string len 24 of lowercase letters '''
# make sure to drop this file in the char-rnn directory # creates a pool of words, and adds to the pool when it starts getting too small import traceback import remotequeue import time import rnn import pronounce import random Q = remotequeue.make('secret', False) amazing_models = open('amazing_models').read().splitlines() def sample_model(model, chars = 5000): try: model_path = model.split(';')[0].strip() temp = model.split(';')[1].strip() words = rnn.run_temperature(model_path, temp, chars).splitlines()[2:-2] print model,'generated',len(words) return words except: print traceback.format_exc() return [] def create_words(num): print 'creating',num,'words' pool = set() while len(pool) < num * 1.5: model = random.choice(amazing_models) print 'sampling from model',model for word in sample_model(model):