def period_sensitivity_becker( ode_func, params, j, percent, y_init, t0, dt, tf): params_star = copy.copy(params) params_star[j] = (1+percent)*params[j] t, sol = ode15s(ode_func, y_init, t0, dt, tf, params) t_star, sol_star = ode15s(ode_func, y_init, t0, dt, tf, params_star) per = get_period(t[:], np.add(np.add(sol[:,4],sol[:,5]),sol[:,6])) per_star = get_period(t_star[:], np.add(np.add(sol_star[:,4], sol_star[:,5]),sol_star[:,6])) return (per_star-per)/percent
def period_sensitivity( ode_func, params, j, percent, y_init, t0, dt, tf, mid=True): params_star = copy.copy(params) params_star[j] = (1+percent)*params[j] t, sol = ode15s(ode_func, y_init, t0, dt, tf, params) t_star, sol_star = ode15s(ode_func, y_init, t0, dt, tf, params_star) #ignore the first half of the simulation if mid is True (let the system settle) if(mid==True): mid = int(len(t)/2) per = get_period(t[mid:], sol[mid:, 1]) per_star = get_period(t_star[mid:], sol_star[mid:, 1]) else: per = get_period( t, y[0] ) per_star = get_period( t_star, y_star[0] ) return (per_star-per)/percent
def get_period(kic): frequencies = [] df_list = [] filenames = utils.get_filenames(utils.BASE_PATH + str(kic), "csv") if len(filenames) <= 1: return {"period": 0.0, "fap": 0.0, "theta": 0.0, "periods": []} for idx, filename in enumerate(filenames): if (idx > 2): data = utils.pd.read_csv(utils.BASE_PATH + str(kic) + "/" + filename) try: freq = utils.get_freq_LS(data.TIME.to_numpy(),data.PDCSAP_FLUX.to_numpy(),data.EFPDC.to_numpy()) frequencies.append(freq) except Exception as e: print(e) print(idx) print(kic) df_list.append(data) df = utils.pd.DataFrame() for _df in df_list: df = df.append(_df) t = df.TIME.to_numpy() y = df.FPDC.to_numpy() dy = df.EFPDC.to_numpy() period1 = utils.get_period(t, y, dy, frequencies) period2 = utils.get_period(t, y, dy) periods = [period1, period2] nbins = 3 if period2 < 0.09 or period2 > 100: period = period1 theta = None else: try: period, theta = utils.get_period_pdm(t, y, dy, periods, nbins) except: period = utils.median(periods) theta = None df = None data = None df_list = [] return {"period": period, "theta": theta, "periods": periods}
def top(group): max_amount = request.args.get("k", 10, type=int) period, start, end, cache_time = get_period(request, "day") params = { "start": start.strftime(time_format), "end": end.strftime(time_format), "group": group } data = cache(process_top, group, max_amount, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(data, Response): return data if len(data) < max_amount: max_amount = len(data) template_data = { "data": data, "group": group, "disp_group": display_group(group), "max_amount": str(max_amount), "period": period, "start": display_datetime(start), "end": display_datetime(end), "title": make_title("Top {} {}".format(max_amount, display_group(group))) } return render_template("top.html", **template_data)
def view_token_co_occurrences(keyword): period, start, end, cache_time = get_period(request, "week") params = { "start": start.strftime(time_format), "end": end.strftime(time_format) } keyword_data = cache(process_tokens, keyword, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(keyword_data, Response): return keyword_data occurrences = [] for k in keyword_data["occurrences"]: if k["text"] != keyword: k["pos"] = parse_pos(k["pos"].split("(")[0]) occurrences.append(k) nums = range(1, len(occurrences) + 1) template_data = { "keyword": keyword, "period": period, "start": display_datetime(start), "end": display_datetime(end), "occurrences": zip(nums, occurrences) } return render_template("occurrences.html", title=make_title(keyword), **template_data)
def view_tweets_about_keyword(keyword): period, start, end, cache_time = get_period(request, "week") params = { "start": start.strftime(time_format), "end": end.strftime(time_format) } keyword_data = cache(process_details, keyword, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(keyword_data, Response): return keyword_data num_tweets = keyword_data["num_tweets"] tweets = keyword_data["tweets"] retweets = keyword_data["retweets"] template_data = { "keyword": keyword, "num_tweets": num_tweets, "num_unique_tweets": len(tweets), "tweets": tweets, "retweets": retweets, "period": period, "start": display_datetime(start), "end": display_datetime(end) } return render_template("tweets.html", title=make_title(keyword), **template_data)
def read_csv(kic): periods = [] df_list = [] filenames = utils.get_filenames(utils.BASE_PATH + str(kic), "csv") if len(filenames) <= 5: return {"df_list": df_list, "period": 0.0} for idx, filename in enumerate(filenames): if idx > 2: data = utils.pd.read_csv(utils.BASE_PATH + str(kic) + "/" + filename) try: res = utils.get_signal_parameters(data.dropna().TIME, data.dropna().PDC_NORM_FILT) periods.append(res["period"]) except Exception as e: print(e) print(idx) print(kic) df_list.append(data) df = utils.pd.DataFrame() for _df in df_list: df = df.append(_df) period = utils.get_period(df.TIME, df.PDC_NORM_FILT, df.EFPDC, periods) return {"df_list": df_list, "period": period}
def view_news(keyword): period, start, end, cache_time = get_period(request, "week") news_data = cache(process_news, keyword, start, end, cache_time=cache_time, path=get_req_path(request)) if isinstance(news_data, Response): return news_data period_name = { "day": "dag", "week": "week", "month": "maand" }.get(period, "dag") news = [] for item in news_data: item["pubdate"] = display_datetime(item["pubdate"]) del item["nid"] news.append(item) template_data = { "keyword": keyword, "start": display_datetime(start), "end": display_datetime(end), "period": period, "period_name": period_name, "news": news } return render_template("news.html", title=make_title(keyword), **template_data)
def view_group(group): period, start, end, cache_time = get_period(request, "day") params = { "start": start.strftime(time_format), "end": end.strftime(time_format), "group": group } keywords = cache(tweety.get_keywords, cache_time=cache_time, path=get_req_path(request), **params) if isinstance(keywords, Response): return keywords total = sum([entry["count"] for entry in keywords]) for keyword in keywords: keyword["percentage"] = "{:.2f}".format(keyword["count"] / total * 100) keyword["count"] = display_number(keyword["count"]) nums = range(1, len(keywords) + 1) template_data = { "nums_keywords": zip(nums, keywords), "group": group, "disp_group": display_group(group), "nums": nums, "total": display_number(total), "period": period, "start": display_datetime(start), "end": display_datetime(end) } return render_template("group.html", title=make_title(template_data["disp_group"]), **template_data)
def get(self): filename = self.request.get("filename") int_time, finish_time = utils.get_period(filename) mlistble, mlistwifi = utils.get_list(filename) params = { 'filename': filename, 'mlistwifi': mlistwifi, 'mlistble': mlistble, 'int_time': int_time, 'finish_time': finish_time } self.render_template('machineLearningAdvanced.html', params)
def top_widget(group): """A small widget showing the top 5 in the group.""" max_amount = 10 # this is 10, so we re-use the cached data from the top 10 _, start, end, cache_time = get_period(request, "day") params = { "start": start.strftime(time_format), "end": end.strftime(time_format), "group": group } data = cache(process_top, group, max_amount, params, cache_time=cache_time)[:5] data = [d["label"] for d in data] return render_template("widget.html", data=data)
def explore_param_space_becker( ode_func, params, j, y_init, t0, dt, tf): params_cp = copy.copy(params) pers = [] values = [] for percent in np.arange(-.5, .5, .001): params_cp[j] = (1+percent)*params[j] values.append(params_cp[j]) t_star, sol_star = ode15s(ode_func, y_init, t0, dt, tf, params_cp) mid = int(len(t_star)/2) per = get_period(t_star[mid:], np.add(np.add(sol_star[mid:,4], sol_star[mid:,5]),sol_star[mid:,6])) pers.append(per) return values, pers
def vdpCircadianError(params): # Initial condition and time bounds t0 = 0 y0 = [2, 0] tend = 480 dt = .1 # Get the "true solution" tm, ym = ode15s(vdp, y0, t0, dt, tend, params) mid = round(len(tm) / 2) per = get_period(ym[mid:, 1], tm[mid:]) perrval = math.pow(((per - 24) / 24), 2) return perrval
def goldbeter_fly_cost_function(params): # initial conditions M = 1 P0 = 1 P1 = 1 P2 = 1 PN = 1 yinit = [M, P0, P1, P2, PN] t0 = 0 tf = 800 dt = .1 RelTol = 1e-8 with warnings.catch_warnings(): warnings.filterwarnings('error') try: t, sol = ode15s(goldbeter_fly, yinit, t0, dt, tf, params, rtol=RelTol) except (ValueError, UserWarning) as e: cost = math.inf return cost mid = int(len(t) / 2) desired_per = 23.6 desired_amp = .1 with warnings.catch_warnings(): warnings.filterwarnings('error') try: per = get_period(t[mid:], sol[mid:, 1]) amps = get_amps(sol[mid:]) except RuntimeWarning as e: # something went wrong, most likely no oscillation was created cost = math.inf return cost rate = math.log(.001) / .1 amperrvals = np.exp(np.multiply(amps, rate)) amperrval = np.sum(amperrvals) perrval = math.pow(((per - desired_per) / desired_per), 2) return (perrval + amperrval)
def gonzeGoodwinFullCircadianError2(params): M0 = 1 P0 = 1 I0 = 1 yinit = [M0, P0, I0] t0 = 0 tf = 800 dt = .1 # Simulate the model RelTol = 1e-8 with warnings.catch_warnings(): warnings.filterwarnings('error') try: t, sol = ode15s(gonze_goodwin, yinit, t0, dt, tf, params, rtol=RelTol) except (ValueError, UserWarning) as e: cost = math.inf return cost mid = int(len(t) / 2) with warnings.catch_warnings(): warnings.filterwarnings('error') try: per = get_period(t[mid:], sol[mid:, 1]) amps = get_amps(sol[mid:]) except RuntimeWarning as e: # something went wrong, most likely no oscillation was created cost = math.inf return cost perrval = math.pow((per - 24) / 24, 2) # an amplitude larger than 0.1 is going to have low cost rate = math.log(0.001) / 0.1 amp_errvals = np.exp(np.multiply(rate, amps)) errval = perrval + np.sum(amp_errvals) return errval
def home(): sync_time = redis.get("sync_time") if sync_time: sync_time = sync_time.decode("utf-8") max_amount = request.args.get("k", 10, type=int) period, start, end, cache_time = get_period(request, "day") params = { "start": start.strftime(time_format), "end": end.strftime(time_format), "group": "bloemen" } bloemen = cache(process_top, "bloemen", max_amount, params, cache_time=cache_time, path=get_req_path(request)) params["group"] = "groente_en_fruit" groente_en_fruit = cache(process_top, "groente_en_fruit", max_amount, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(bloemen, Response): return bloemen if isinstance(groente_en_fruit, Response): return groente_en_fruit template_data = { "bloemen": bloemen, "groente_en_fruit": groente_en_fruit, "sync_time": sync_time, "start": display_datetime(start), "end": display_datetime(end), "period": period } return render_template("home.html", title=make_title("BigTU research project"), **template_data)
def main(): # Get current day - 1 logger.info('Calculating yesterday\'s day') yesterday = get_yesterday_date() # Get yesterday's measurement logger.info(f'Getting measurement for {get_formatted_date(yesterday)}') measurement = Measurement.objects( created=get_formatted_date(yesterday)).first() if measurement is None: raise Exception( f'Measurement for date={get_formatted_date(yesterday)} was not found' ) # Compute the period for that measurment logger.info(f'Calculating period for {get_formatted_date(yesterday)}') period = get_period(yesterday) # Get the report by period logger.info(f'Getting report for {period}') report = Report.objects(period=period).first() if report is None: logger.info(f'Report not found, creating a new report for {period}') report = Report( period=period, values=[], user=User.objects(email='*****@*****.**').first()) report.save() logger.info( f'Adding a new measurement for {measurement.created} in period {period}' ) report_item = MeasurementValue(sys=measurement.sys, dia=measurement.dia, pul=measurement.pul, date=measurement.created, ok=measurement.is_ok()) report.values.append(report_item) report.save() logger.info('Done')
def view_stories(group): period, start, end, cache_time = get_period(request, "week") params = { "start": start.strftime(time_format), "end": end.strftime(time_format) } story_data = cache(process_stories, group, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(story_data, Response): return story_data active_stories, closed_stories = story_data storify_data = [] timeline_data = [] timeline_start = timegm(start.timetuple()) * 1000 timeline_end = timegm(end.timetuple()) * 1000 display_tweets = 11 display_active_stories = 10 display_closed_stories = 5 for story in active_stories: if not (len(storify_data) < display_active_stories): break story = filter_story(story, display_tweets) timeline_info = { "label": len(storify_data), "times": story["cluster_details"] } del story["cluster_details"] storify_data.append(story) timeline_data.append(timeline_info) for story in closed_stories: if not (len(storify_data) < display_active_stories + display_closed_stories): break story = filter_story(story, display_tweets) timeline_info = { "label": len(storify_data), "times": story["cluster_details"] } del story["cluster_details"] storify_data.append(story) timeline_data.append(timeline_info) template_data = { "group": display_group(group), "storify_data": json.dumps(storify_data), "timeline_data": json.dumps(timeline_data), "timeline_start_ts": timeline_start, "timeline_end_ts": timeline_end, "display_tweets": display_tweets, "num_stories": min(display_active_stories + display_closed_stories, len(storify_data)), "start": display_datetime(start), "end": display_datetime(end), "period": period } return render_template("storify.html", title=make_title(group), **template_data)
P2 = .4 PN = .4 yinit = [M, P0, P1, P2, PN] t0 = 0 tf = 800 dt = .1 t, sol = ode15s(goldbeter_fly, yinit, t0, dt, tf, params) sols.append(sol) j = 2 print(get_period(np.sum(sol[:-j, 1:4], axis=1), t[:-j])) print(get_amps(sol[:-j], t[:-j])) # plt.plot(t[:-j], sol[:-j, 0], 'b', label='M') # plt.plot(t[:-j], sol[:-j, 1], 'g', label='P0') # plt.plot(t[:-j], sol[:-j, 2], 'm', label='P1') # plt.plot(t[:-j], sol[:-j, 3], 'r', label='P2') # plt.plot(t[:-j], sol[:-j, 4], 'k', label='PN') # plt.plot(t[:-j], np.sum(sol[:-j, 1:4], axis=1), 'c', label='PT') # plt.legend(loc='best') # plt.xlabel('time / h') # plt.ylabel('PER forms or M') # plt.ylim(ymin=0, ymax=5.5) # plt.title('Oscillations in PER over Time') # plt.grid() # plt.show()
def view_keyword(keyword): deluxe = is_deluxe( current_user ) # users in the "deluxe" group can specify their own time period period, start, end, cache_time = get_period(request, "week") if period == "custom": if not deluxe: flash( "Deze functionaliteit is alleen beschikbaar voor goedgekeurde gebruikers.", "error") return redirect(url_for("horti.home")) if (end - start).days > 31: flash("Periode langer dan een maand is niet toegestaan", "error") return redirect(url_for("horti.home")) if start > end: flash("De einddatum moet na de begindatum zijn.", "error") return redirect(url_for("horti.home")) params = { "start": start.strftime(time_format), "end": end.strftime(time_format) } keyword_data = cache(process_details, keyword, params, cache_time=cache_time, path=get_req_path(request)) if isinstance(keyword_data, Response): return keyword_data urls = keyword_data["URLs"][:16] for url in urls: url["display_url"] = shorten(url["link"], 80) del keyword_data["URLs"] keyword_data["tagCloud"] = keyword_data["tagCloud"][:200] photos = enumerate(keyword_data["photos"] ) # number of photo's is limited in processing.py del keyword_data["photos"] display_tweets = 11 max_tweets = 200 keyword_data["tweets"] = keyword_data["tweets"][:max_tweets] keyword_data["retweets"] = keyword_data["retweets"][:display_tweets] keyword_data["interaction_tweets"] = keyword_data[ "interaction_tweets"][:max_tweets] num_tweets = keyword_data["num_tweets"] del keyword_data["num_tweets"] graph = keyword_data["graph"] del keyword_data["graph"] polarity = keyword_data["polarity"] del keyword_data["polarity"] polarity_face = display_polarity(polarity) gtrends_period = { "day": "now 1-d", "week": "now 7-d", "month": "today 1-m" }.get(period, "now 1-d") period_name = { "day": "dag", "week": "week", "month": "maand" }.get(period, "dag") news = [] for item in keyword_data["news"]: item["pubdate"] = display_datetime(item["pubdate"]) del item["nid"] news.append(item) del keyword_data["news"] template_data = { "keyword": keyword, "keyword_data": json.dumps(keyword_data), "deluxe": deluxe, "num_tweets": display_number(num_tweets), "urls": urls, "graph": json.dumps(graph), "photos": photos, "display_tweets": display_tweets, "start": display_datetime(start), "end": display_datetime(end), "period": period, "period_name": period_name, "polarity": polarity, "polarity_face": polarity_face, "gtrends_period": gtrends_period, "news": news } return render_template("keyword.html", title=make_title(keyword), **template_data)