def write_data_csv(playgrounds=None): """ Outputs a CSV-ified version of our playgrounds DB. """ if not playgrounds: playgrounds = get_active_playgrounds() fields = get_active_playgrounds()[0].__dict__['_data'].keys() fields.extend([f['key'] for f in copytext.COPY.feature_list]) with open('www/npr-accessible-playgrounds.csv', 'wb') as csvfile: csvwriter = csv.DictWriter(csvfile, fields) csvwriter.writeheader() for playground in get_active_playgrounds(): playground_dict = playground.to_dict() playground_dict.pop('features') for feature in copytext.COPY.feature_list: playground_dict[feature['key']] = False if playground.features: for f in playground.features: playground_dict[f.slug] = True csvwriter.writerow(playground_dict)
def write_data_json(playgrounds=None): """ Output a JSON-ified version of our playgrounds DB. """ if not playgrounds: playgrounds = get_active_playgrounds() payload = {} payload['meta'] = {} payload['playgrounds'] = [] payload['meta']['count'] = playgrounds.count() payload['meta']['states'] = defaultdict(int) payload['meta']['features'] = defaultdict(int) for playground in playgrounds: payload['meta']['states'][playground.state] += 1 if playground.features: for feature in playground.features: payload['meta']['features'][feature.slug.replace('-', ' ')] += 1 payload['playgrounds'].append(playground.to_dict()) with open('www/npr-accessible-playgrounds.json', 'wb') as jsonfile: jsonfile.write(json.dumps(payload))
def render_playgrounds(playgrounds=None): """ Render the playgrounds pages. """ from flask import g, url_for os.system('curl -o data/copy.xls "%s"' % app_config.COPY_URL) less() jst() if not playgrounds: playgrounds = get_active_playgrounds() slugs = [p.slug for p in playgrounds] app_config_js() compiled_includes = [] updated_paths = [] for slug in slugs: # Silly fix because url_for require a context with app.app.test_request_context(): path = url_for('_playground', playground_slug=slug) with app.app.test_request_context(path=path): print 'Rendering %s' % path g.compile_includes = True g.compiled_includes = compiled_includes view = app.__dict__['_playground'] content = view(slug) compiled_includes = g.compiled_includes path = '.playgrounds_html%s' % path # Ensure path exists head = os.path.split(path)[0] try: os.makedirs(head) except OSError: pass with open(path, 'w') as f: f.write(content.encode('utf-8')) updated_paths.append(path) return updated_paths
def sitemap(): """ Renders a sitemap. """ context = make_context() context['pages'] = [] now = date.today().isoformat() context['pages'].append(('/', now)) for playground in get_active_playgrounds(): context['pages'].append((url_for('_playground', playground_slug=playground.slug), now)) sitemap = render_template('sitemap.xml', **context) return (sitemap, 200, { 'content-type': 'application/xml' })
def update_search_index(playgrounds): if not playgrounds: playgrounds = get_active_playgrounds() print 'Generating SDF batch...' sdf = [playground.sdf() for playground in playgrounds] payload = json.dumps(sdf) if len(payload) > 5000 * 1024: print 'Exceeded 5MB limit for SDF uploads!' return print 'Uploading to CloudSearch...' response = requests.post('http://%s/2011-02-01/documents/batch' % app_config.CLOUD_SEARCH_DOC_DOMAIN, data=payload, headers={'Content-Type': 'application/json'}) print response.status_code print response.text
def sitemap(): """ Renders a sitemap. """ context = make_context() context['pages'] = [] now = date.today().isoformat() context['pages'].append(('/', now)) for playground in get_active_playgrounds(): context['pages'].append( (url_for('_playground', playground_slug=playground.slug), now)) sitemap = render_template('sitemap.xml', **context) return (sitemap, 200, {'content-type': 'application/xml'})