def testex(request, filename="wordgap/data/test.txt", pos="v", wordnet="wordnet"): # alles in unicode strings # [[wordsbefore, wordsafter, token, [dis]], ...] if wordnet=='wordnet': fast = True else: fast = False filename = "wordgap/data/" + filename + ".txt" if path.exists(filename) and path.isfile(filename) and access(filename, R_OK): text = tools.load_text(filename) else: text = tools.load_text("wordgap/data/test.txt") if (pos=='v' or pos=='n' or pos=='a'): ex = wordex.create_ex(text, pos=pos, fast=fast) elif pos=='p': ex = prepex.create_prepex(text) else: print "invalid POS Tag" return Http404 # reset session data request.session.flush() request.session["ex"]=ex request.session["wrong"] = 0 request.session["right"] = 0 return render_to_response("templates/template_start.html")
def read_features(filename_fet, nchannels, fetdim, freq, do_process=True): """Read a .fet file and return the normalize features array, as well as the spiketimes.""" try: features = load_text(filename_fet, np.int64, skiprows=1, delimiter=' ') except ValueError: features = load_text(filename_fet, np.float32, skiprows=1, delimiter='\t') if do_process: return process_features(features, fetdim, nchannels, freq, nfet=first_row(filename_fet)) else: return features
def weather_data(): PATH = datamanager.read_path() request_data = datamanager.read_json(PATH + "weather/", "request_data") url = request_data["url"] headers = request_data["headers"] widget_selector = request_data["widget_selector"] html_text = tools.load_html(url, headers) html_list = bs4.BeautifulSoup(html_text.text, "html.parser") icon_selector = widget_selector + " " + request_data["icon_selector"] tag_name = request_data["icon_titles"]["tag_name"] attr_title = request_data["icon_titles"]["attr_title"] icon_title_list = tools.load_icon_titles(icon_selector, html_list, tag_name, attr_title) time_selector = widget_selector + " " + request_data["time_selector"] tag_name = request_data["time"]["tag_name"] time_list = tools.load_text(time_selector, html_list, tag_name) warm_selector = widget_selector + " " + request_data["warm_selector"] tag_name = request_data["warm"]["tag_name"] warm_list = tools.load_text(warm_selector, html_list, tag_name) time_indexes = tools.get_list_time(time_list) icon_titles = tools.get_list_icon_titles(icon_title_list, time_indexes) warm_intervals = tools.get_list_warm(warm_list, time_indexes) morning = { "icon_title": icon_titles["morning_icon_title"], "warm": warm_intervals["morning_warm_interval"] } day = { "icon_title": icon_titles["day_icon_title"], "warm": warm_intervals["day_warm_interval"] } night = { "icon_title": icon_titles["night_icon_title"], "warm": warm_intervals["night_warm_interval"] } weather_data = {"morning": morning, "day": day, "night": night} return weather_data
def testprepex(request): text = tools.load_text("wordgap/data/test.txt") ex = prepex.create_prepex(text) if ex is None: raise Http404 request.session.flush() request.session["ex"]=ex request.session["wrong"] = 0 request.session["right"] = 0 return render_to_response("templates/template_start.html")
def read_probe(filename_probe, fileindex): """fileindex is the shank index.""" if not filename_probe: return if os.path.exists(filename_probe): # Try the text-flavored probe file. try: probe = load_text(filename_probe, np.float32) except: # Or try the Python-flavored probe file (SpikeDetekt, with an # extra field 'geometry'). try: ns = {} execfile(filename_probe, ns) probe = ns['geometry'][fileindex] probe = np.array([probe[i] for i in sorted(probe.keys())], dtype=np.float32) except: return None return process_probe(probe)
def read_waveforms(filename_spk, nsamples, nchannels): waveforms = np.array(load_binary(filename_spk), dtype=np.float32) n = waveforms.size if n % nsamples != 0 or n % nchannels != 0: waveforms = load_text(filename_spk, np.float32) return process_waveforms(waveforms, nsamples, nchannels)
def read_masks(filename_mask, fetdim): masks_full = load_text(filename_mask, np.float32, skiprows=1) return process_masks(masks_full, fetdim)
def read_group_info(filename_groupinfo): # For each group (absolute indexing): color index, and name group_info = load_text(filename_groupinfo, str, delimiter='\t') return process_group_info(group_info)
def read_cluster_info(filename_acluinfo): # For each cluster (absolute indexing): cluster index, color index, # and group index cluster_info = load_text(filename_acluinfo, np.int32) return process_cluster_info(cluster_info)
def read_res(filename_res, freq=None): res = load_text(filename_res, np.int32) return process_res(res, freq)
def read_clusters(filename_clu): clusters = load_text(filename_clu, np.int32) return process_clusters(clusters)
def testjson(request): text = tools.load_text("wordgap/data/test.txt") #ex = wordex.create_ex(text, pos='v', fast=True) ex = prepex.create_prepex(text) return HttpResponse(simplejson.dumps(ex), mimetype='application/json')