def graph_csv(output_directory, csv_file, plot_title, output_filename, y_label=None, precision=None, graph_height="600", graph_width="1500", graph_type="line", graph_color="black"): """ Single metric graphing function using matplotlib""" if not os.path.getsize(csv_file): return False, None y_label = y_label or plot_title days, impressions = numpy.loadtxt(csv_file, unpack=True, delimiter=",", converters={ 0: convert_to_mdate}) fig = plt.figure() fig.set_size_inches(float(graph_width) / 80, float(graph_height) / 80) if graph_type == "line": line_style = "-" marker = " " else: marker = "." line_style = None plt.plot_date(x=days, y=impressions, linestyle=line_style, marker=marker, color=graph_color) plt.title(plot_title) plt.ylabel(y_label) plt.grid(True) # Get current axis and its xtick labels labels = plt.gca().get_xticklabels() for label in labels: label.set_rotation(30) plot_file_name = os.path.join(output_directory, output_filename + ".png") fig.savefig(plot_file_name) plt.close() return True, None
def graph_data(stock): URL = "http://chartapi.finance.yahoo.com/instrument/1.0/%s/chartdata;type=quote;range=3y/csv" % (stock) source_code = urllib.request.urlopen(URL).read().decode() stock_data = [] split_source = source_code.split("\n") for line in split_source: # hard code to get rid of header if ":" not in line: stock_data.append(line) # bytespdate2num function format # %Y = Full year 2017 # %y = partial year 17 # %m = number month # %d = number day # %H = hours # %M = minutes # %S = seconds date, close_price, high_price, low_price, open_price, volume = np.loadtxt( stock_data, delimiter=",", unpack=True, converters={0: bytespdate2num('%Y%m%d')}) plt.plot_date(date, close_price, '-', label="Price") plt.xlabel("Date") plt.ylabel("Price ($)") plt.title('Yahoo Finance') plt.legend() plt.show() # show the drawing
def graph(csv_file, filename, bytes2str): '''Create a line graph from a two column csv file.''' unit = configs['unit'] date, value = np.loadtxt(csv_file, delimiter=',', unpack=True, converters={0: bytes2str} ) fig = plt.figure(figsize=(10, 3.5)) fig.add_subplot(111, axisbg='white', frameon=False) rcParams.update({'font.size': 9}) plt.plot_date(x=date, y=value, ls='solid', linewidth=2, color='#FB921D', fmt=':' ) title = "Sump Pit Water Level {}".format(time.strftime('%Y-%m-%d %H:%M')) title_set = plt.title(title) title_set.set_y(1.09) plt.subplots_adjust(top=0.86) if unit == 'imperial': plt.ylabel('inches') if unit == 'metric': plt.ylabel('centimeters') plt.xlabel('Time of Day') plt.xticks(rotation=30) plt.grid(True, color='#ECE5DE', linestyle='solid') plt.tick_params(axis='x', bottom='off', top='off') plt.tick_params(axis='y', left='off', right='off') plt.savefig(filename, dpi=72)
def plot_on_timeline(col, verbose=True): """Plots points on a timeline Parameters ---------- col : np.array verbose : boolean iff True, display the graph Returns ------- matplotlib.figure.Figure Figure containing plot Returns ------- matplotlib.figure.Figure """ col = utils.check_col(col) # http://stackoverflow.com/questions/1574088/plotting-time-in-python-with-matplotlib if is_nd(col): col = col.astype(datetime) dates = matplotlib.dates.date2num(col) fig = plt.figure() plt.plot_date(dates, [0] * len(dates)) if verbose: plt.show() return fig
def graph_date_v_charge(data): """ Graphs charge data over time. @param data: Parsed data from log file; output of L{parseLog} """ print "Graphing Date v. Charge" # Build color data based on charge percentage colors = [] for charge in data['charges']: colors.append((1-charge/100.0, charge/100.0, 0.3)) f1 = pyplot.figure(1) for i in range(len(data['dates'])): pyplot.plot_date(data['dates'][i], data['charges'][i], 'o', color=colors[i], markersize=3, markeredgewidth=0.1) pyplot.ylim(0,100) pyplot.xlabel('Date') pyplot.ylabel('Charge [%]') pyplot.title('Laptop Battery Charge') pyplot.grid(True) pyplot.figure(1).autofmt_xdate() pyplot.savefig(outfile + ".png", dpi=500) pyplot.close(f1)
def _graph_equity_curve(self, equity_curve_dataframe): """ Charts the results :param equity_curve_dataframe: """ date_axis = equity_curve_dataframe.index equity_curve = equity_curve_dataframe['equity_curve'] returns = equity_curve_dataframe['returns'] drawdown = equity_curve_dataframe['drawdown'] plt.figure(1) fig = plt.subplot(311) plt.title('S&P500 Forcasting using QDA with lag 2') plt.ylabel('Portfolio value %') plt.plot_date(date_axis, equity_curve, '-') plt.grid(True) plt.subplot(312) plt.ylabel('Period returns %') plt.bar(date_axis, returns) plt.grid(True) plt.subplot(313) plt.ylabel('Drawdown %') plt.plot_date(date_axis, drawdown, '-') plt.grid(True) plt.show()
def get_data_from_internet(stack, date): baseurl = "http://chartapi.finance.yahoo.com/instrument/1.0/"+ stack +"/chartdata;type=quote;range="+date+"/csv" sourece_code = urllib.request.urlopen(baseurl).read().decode() # print(sourece_code) stock_data = [] split_source = sourece_code.split("\n") for line in split_source: split_line = line.split(',') if len(split_line) == 6: if 'values' not in line and 'labels' not in line: stock_data.append(line) print(stock_data) if 'd' in date: date, close, high, low, openp, volume = np.loadtxt(stock_data, delimiter=',', unpack=True, converters={0: bytesdata2num('%y%m%d %H%M', 'd')}) else: date, close, high, low, openp, volume = np.loadtxt(stock_data, delimiter=',', unpack=True, converters={0: bytesdata2num('%Y%m%d', 'y')}) plt.plot_date(date, close, '-', label='从网络加载股价') plt.xlabel("时间") plt.ylabel("股价") plt.title("练习从网络加载数据!") rect = plt.figure(1).patch rect.set_facecolor('c') plt.legend() #plot有label选项时,必须要有此句 plt.show()
def handle(self, *args, **options): # Get user join dates User = get_user_model() datetimes = User.objects.values_list('date_joined', flat=True) \ .order_by('date_joined') dates = map(lambda d: d.date(), datetimes) # Get some auxilliary values min_date = date2num(dates[0]) max_date = date2num(dates[-1]) days = max_date - min_date + 1 # Initialize X and Y axes x = np.arange(min_date, max_date + 1) y = np.zeros(days) # Iterate over dates, increase registration array for date in dates: index = int(date2num(date) - min_date) y[index] += 1 y_sum = np.cumsum(y) # Plot plt.plot_date(x, y_sum, xdate=True, ydate=False, ls='-', ms=0, color='#16171E') plt.fill_between(x, 0, y_sum, facecolor='#D0F3FF') plt.title('Studentenportal: Registrierte Benutzer') plt.rc('font', size=8) if options['save']: plt.savefig(options['save']) else: plt.show()
def draw_viz(t1, t2): data_store = sds() delta = datetime.timedelta(days=1) t1_data = [] t2_data = [] dates = [] d_cursor = data_store.get_company_data(t1) for d in d_cursor: t1_data.append(d["Adj Clos"]) dates.append(d["date"]) d_cursor = data_store.get_company_data(t2) for d in d_cursor: t2_data.append(d["Adj Clos"]) print len(t1_data), len(t2_data) p, sprd, beta = fc.get_adf(t1, t2, spread=True) dates = mpl.dates.date2num(dates) p1 = plt.plot_date(dates, sprd, "b-.", label="Sprd") p2 = plt.plot_date(dates, t1_data, "g-.", label=t1) p3 = plt.plot_date(dates, t2_data, "r-.", label=t2) plt.grid(True) plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.0) def add_vert_line(event): plt.avspan(event.xdata, event.xdata, ls="p-") print "Beta: %f" % beta plt.show()
def plotBrokerQueue(dataTask, filename): """Generates the broker queue length graphic.""" print("Plotting broker queue length for {0}.".format(filename)) plt.figure() # Queue length plt.subplot(211) for fichier, vals in dataTask.items(): if type(vals) == list: timestamps = list(map(datetime.fromtimestamp, map(int, list(zip(*vals))[0]))) # Data is from broker plt.plot_date(timestamps, list(zip(*vals))[2], linewidth=1.0, marker='o', markersize=2, label=fichier) plt.title('Broker queue length') plt.ylabel('Tasks') # Requests received plt.subplot(212) for fichier, vals in dataTask.items(): if type(vals) == list: timestamps = list(map(datetime.fromtimestamp, map(int, list(zip(*vals))[0]))) # Data is from broker plt.plot_date(timestamps, list(zip(*vals))[3], linewidth=1.0, marker='o', markersize=2, label=fichier) plt.title('Broker pending requests') plt.xlabel('time (s)') plt.ylabel('Requests') plt.savefig(filename)
def plot_timeline_epoch(usr1, usr2, interaction1=None, interaction2=None): print "########## Plotting for ", usr1, usr2, "###################" if interaction1 is not None: tweets_per_day1 = extract_daily_interaction(interaction1) plt.plot_date(x=tweets_per_day1.keys(), y=tweets_per_day1.values(), fmt=u'b*') print usr1, len(tweets_per_day1.keys()), sorted(tweets_per_day1.keys()) if interaction2 is not None: #print usr2, len(interaction2) tweets_per_day2 = extract_daily_interaction(interaction2) plt.plot_date(x=tweets_per_day2.keys(), y=tweets_per_day2.values(), fmt=u'xr') if interaction1 is not None and interaction2 is not None: print usr1, usr2 plt.title("Mentions 2 users: from " + usr1 + " (blue); from " + usr2 + " (red).") elif interaction1 is not None: plt.title("Mentions from " + usr1 + " to " + usr2 + ".") elif interaction2 is not None: plt.title("Mentions from " + usr2 + " to " + usr1 + ".") else: print "No interaction between 2 users to be plotted." return plt.xticks(rotation=70) plt.ylabel("# tweets per day") plt.grid(True) plt_name = WORKING_FOLDER + "2_usr_interaction/interaction_" + usr1 + "_and_" + usr2 + ".png" plt.savefig(plt_name, bbox_inches='tight', dpi=440) print "########## Plotting DONE for ", usr1, usr2, "###############" plt.clf()
def _analyze(recordIdList): n_artists = get_n_artists() n_days = get_n_days(isX=False, isTrain=False) resultDict = dict() for recordId in recordIdList: resultDict[recordId] = getPredict(recordId) pdf = PdfPages('../report/record.pdf') for i in range(n_artists): fig = plt.figure() ax = plt.axes() ax.xaxis.set_major_formatter(DateFormatter('%m%d')) for recordId in recordIdList: result = resultDict[recordId] dsList = result[:,1] firstDay = datetime.strptime(dsList[0], '%Y%m%d') artist_id = result[i*n_days,0] xData = np.arange(n_days) + date2num(firstDay) yData = result[i*n_days:(i+1)*n_days,2] plt.plot_date(xData, yData, fmt='-', label=recordId) plt.legend(loc='best', shadow=True) plt.xlabel('day') plt.ylabel('plays') plt.title(artist_id) pdf.savefig(fig) plt.close() pdf.close()
def stockData(stock): stock_price_url='http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=10y/csv' source_req=urllib2.Request(stock_price_url) source_response=urllib2.urlopen(source_req) source_code=source_response.read().decode() stock_data=[] split_source=source_code.split('\n') for line in split_source: split_line=line.split(',') if len(split_line)==6: if 'values' not in line and 'labels' not in line: stock_data.append(line) date,closep,highp,lowp,openp,volume=np.loadtxt(stock_data, delimiter=',', unpack=True, converters={0: bytespdate2num('%Y%m%d')}) plt.plot_date(date,closep,'-',label='price') plt.xlabel('Date') plt.ylabel('Price') plt.title('Intresting Graph \n Check it out') plt.show()
def plot_trend_graph_all_tests(self, save_path='', file_name='_trend_graph.png'): time_format1 = '%d-%m-%Y-%H:%M' time_format2 = '%Y-%m-%d-%H:%M' for test in self.tests: test_data = test.results_df[test.results_df.columns[2]].tolist() test_time_stamps = test.results_df[test.results_df.columns[3]].tolist() start_date = test_time_stamps[0] test_time_stamps.append(self.end_date + '-23:59') test_data.append(test_data[-1]) float_test_time_stamps = [] for ts in test_time_stamps: try: float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format1))) except: float_test_time_stamps.append(matdates.date2num(datetime.strptime(ts, time_format2))) plt.plot_date(x=float_test_time_stamps, y=test_data, label=test.name, fmt='.-', xdate=True) plt.legend(fontsize='small', loc='best') plt.ylabel('MPPS/Core (Norm)') plt.title('Setup: ' + self.name) plt.tick_params( axis='x', which='both', bottom='off', top='off', labelbottom='off') plt.xlabel('Time Period: ' + start_date[:-6] + ' - ' + self.end_date) if save_path: plt.savefig(os.path.join(save_path, self.name + file_name)) if not self.setup_trend_stats.empty: (self.setup_trend_stats.round(2)).to_csv(os.path.join(save_path, self.name + '_trend_stats.csv')) plt.close('all')
def plotAllData( self, allData, particleSize, room ): dates = allData['Time'] print 'Plotting and saving averages' for shift in ['Day Shift','Night Shift','Empty Lab']: pyplot.figure(num=None, figsize=(16,9), dpi=100, facecolor='w', edgecolor='k') pyplot.plot_date(dates,allData[shift + ' Avg'], 'b-') pyplot.xlabel( 'Date (MT) ') pyplot.ylabel( 'Average Count' ) pyplot.title( room + ' ' + shift + ' Averages for ' + particleSize + ' um Counts' ) pyplot.savefig( room + particleSize + 'Avg' + shift[:-6] + '.png') pyplot.set_yscale('log') pyplot.title( room + ' ' + shift + ' Averages for ' + particleSize + ' um Counts Log Scale' ) pyplot.savefig( room + particleSize + 'Avg' + shift[:-6] + '.png') print 'Plotting and saving baselines' for shift in ['Day Shift','Night Shift','Empty Lab']: pyplot.figure(num=None, figsize=(16,9), dpi=100, facecolor='w', edgecolor='k') pyplot.plot_date(dates,allData[shift + ' Base'], 'b-') pyplot.xlabel( 'Date (MT)' ) pyplot.ylabel( 'Baseline' ) pyplot.title( room + ' ' + shift + ' Baselines for ' + particleSize + ' um Counts' ) pyplot.savefig( room + particleSize + 'Base' + shift[:-6] + '.png') pyplot.set_yscale('log') pyplot.title( room + ' ' + shift + ' Baselines for ' + particleSize + ' um Counts Log Scale' ) pyplot.savefig( room + particleSize + 'Base' + shift[:-6] + '.png')
def fit_model(name, model, X_train, y_train, X_test, pred): """ Fits a classification model (for our purpose this LR, LDA , QDA) using the training data, then makes a prediction and subsequent for the test data. """ # fit and model predict the model on the training , and then test , data model.fit(X_train, y_train) pred[name] = model.predict(X_test) print "Printing Graph : " x = [ mdates.date2num(d) for d in pred.index ] plt.plot_date(x=x, y=pred['Actual'], fmt="ro") plt.grid(True) plt.show() print "Prediction Matrix" print pred plt.show() # and then calculate the hit rare based on the actual direction pred["%s_Correct"%name] = (1.0+pred[name]*pred["Actual"])/2.0 hit_rate = np.mean(pred["%s_Correct" % name]) print "%s: %3.f"%(name, hit_rate)
def recalc_final_result(base_dir="/home/oferb/docs/train_project", experiment_id="webcam2", use_resized=False): import shutil if use_resized: frame_base = "frames_resized" else: frame_base = "frames" frames_dir = "%s/data/%s/%s" % (base_dir, experiment_id, frame_base) resized_files_dir = os.path.join(base_dir, "output", experiment_id, "frames_resized_done") resized_files_dir_times = os.path.join(base_dir, "output", experiment_id, "frames_resized_done_times") files = os.listdir(resized_files_dir) import re ids = [] for filename in files: ids.append(re.findall(r"\d+", filename)) img_times = [] for an_id, filename in zip(ids, files): img_filename = utils.get_image_filename(frames_dir, int(an_id[0]), use_resized) img_times.append(dt.datetime.fromtimestamp(os.path.getctime(img_filename))) shutil.copy2( os.path.join(resized_files_dir, filename), os.path.join(resized_files_dir_times, "%s_%s" % (img_times[-1], filename)), ) values = np.ones([len(img_times), 1]) plt.plot_date(img_times, values)
def show_plot(times, values, np_test, name): x_val, y_val = np.loadtxt( np_test, delimiter=',', unpack=True, converters={0: mdates.strpdate2num('%Y-%m-%d %H:%M:%S.%f')}) # x_val = times # y_val = values # plt.hold(False) plt.title(name) plt.xlabel('Time') plt.ylabel('Values') plt.plot_date(x=x_val, y=y_val, marker='o', markerfacecolor='red', fmt='b-', label='value', linewidth=2) # plt.plot(x_val, y_val) # plt.plot(x_val, y_val, 'or') plt.savefig(os.path.join(MEDIA_FOLDER, 'plots', '%s.png' % name)) plt.clf() plt.cla()
def show(self, beta): print 'Popularity' dates = self.bymonth.first()['dates'].values dates = numpy.concatenate([dates, self.bymonth.last()['dates'][-1:].values]) plt.plot_date(dates, pandas.Series(beta).cumsum()) plt.show() plt.clf()
def main(met_fname, gday_outfname, var): # Load met data s = remove_comments_from_header(met_fname) df_met = pd.read_csv(s, parse_dates=[[0,1]], skiprows=4, index_col=0, sep=",", keep_date_col=True, date_parser=date_converter) # Need to build numpy array, so drop year, doy cols met_data = df_met.ix[:,2:].values met_data_train = df_met.ix[0:4000,2:].values # Load GDAY outputs df = pd.read_csv(gday_outfname, skiprows=3, sep=",", skipinitialspace=True) df['date'] = make_data_index(df) df = df.set_index('date') target = df[var][0:4000].values # BUILD MODELS # hold back 40% of the dataset for testing #X_train, X_test, Y_train, Y_test = \ # cross_validation.train_test_split(met_data, target, \ # test_size=0.4, random_state=0) param_KNR = { "n_neighbors": [20], "weights": ['distance'] } #regmod = DecisionTreeRegressor() #regmod = RandomForestRegressor() #regmod = SVR() regmod = KNeighborsRegressor() pipeit3 = lambda model: make_pipeline(StandardScaler(), PCA(), model) pipeit2 = lambda model: make_pipeline(StandardScaler(), model) regmod_p = pipeit2(regmod) modlab = regmod_p.steps[-1][0] par_grid = {'{0}__{1}'.format(modlab, parkey): pardat \ for (parkey, pardat) in param_KNR.iteritems()} #emulator = GridSearchCV(regmod, param_grid=param_DTR, cv=5) emulator = GridSearchCV(regmod_p, param_grid=par_grid, cv=5) #emulator.fit(X_train, Y_train) emulator.fit(met_data_train, target) predict = emulator.predict(met_data) df = pd.DataFrame({'DT': df.index, 'emu': predict, 'gday': df[var]}) plt.plot_date(df.index[4000:4383], df['emu'][4000:4383], 'o', label='Emulator') plt.plot_date(df.index[4000:4383], df['gday'][4000:4383], 'o', label='GDAY') plt.ylabel('GPP (g C m$^{-2}$ s$^{-1}$)') plt.legend() plt.show()
def generate_plot(dictionary, title, labelX, labelY, filename,ids, flag): import numpy as np figure=plt.figure(figsize=(6*3.13,4*3.13)) plt.title(title) hspace = 1.0 nrow=1 plt.subplots_adjust( hspace=hspace ) figure.autofmt_xdate() for hashtag in dictionary: x = dictionary[hashtag]['x'] x = dates.datestr2num(x) y = dictionary[hashtag]['y'] plt.subplot(3,3,nrow) nrow+=1 plt.ylabel(labelY) plt.xlabel(labelX) plt.xticks(rotation=30) plt.plot_date(x, y, '-',color='green', linewidth=2.0, label=hashtag.decode('utf8')) plt.legend(loc='best',prop={'size':10}) plt.show() figure.savefig(filename+ids,dpi=(1200)) plt.close()
def main(): # Request data from NIWAData response = get(URL, auth=HTTPBasicAuth(USERNAME, PASSWORD)) # Successful requests will return HTTP status code 200 if response.status_code != 200: raise Exception('Failed to request NIWAData: %s' % response.reason) # Parse the JSON response data = response.json() # You can retrieve the attributes about the dataset, analysis_time = data['data']['analysisTime'] measure_mame = data['data']['measureName'] name = data['data']['name'] unit_symbol = data['data']['unitSymbol'] # and also the values values = data['data']['values'] pprint(data) # Plot the values, where x[0] has the datetime, and x[1] the current float value # Note that we are sorting the values by datetime, as they may # not always come sorted dates = np.array([datetime.strptime(x[0], '%Y-%m-%dT%H:%M:%S%z') for x in sorted(values.items())]) values = np.array([x[1] for x in sorted(values.items())]) plt.plot_date(x=dates, y=values, fmt="r-") plt.title(name) plt.ylabel("Value in %s" % unit_symbol) plt.grid(True) plt.show()
def try_prod24h_before(columns=['Tout', 'vWind', 'vWindavg24', 'prod24h_before'], add_const=False, y=y): plt.close('all') X = all_data[columns] res = mlin_regression(y, X, add_const=add_const) timesteps = ens.gen_hourly_timesteps(dt.datetime(2015,12,17,1), dt.datetime(2016,1,15,0)) plt.subplot(2,1,1) plt.plot_date(timesteps, y, 'b', label='Actual prodution') plt.plot_date(timesteps, res.fittedvalues, 'r', label='Weather model') prstd, iv_l, iv_u = wls_prediction_std(res) plt.plot_date(timesteps, iv_u, 'r--', label='95% conf. int.') plt.plot_date(timesteps, iv_l, 'r--') plt.ylabel('MW') plt.legend(loc=2) plt.subplot(2,1,2) plt.plot_date(timesteps, res.resid, '-', label='Residual') plt.ylabel('MW') plt.legend() print "MAE = " + str(mae(res.resid)) print "MAPE = " + str(mape(res.resid, y)) print "RMSE = " + str(rmse(res.resid)) print res.summary() return res
def graph_csv_n(output_directory, csv_file, plot_title, output_filename, columns, y_label=None, precision=None, graph_height="600", graph_width="1500", graph_type="line", graph_color="black"): if not os.path.getsize(csv_file): return False, None y_label = y_label or plot_title fig = plt.figure() fig.set_size_inches(float(graph_width) / 80, float(graph_height) / 80) if graph_type == "line": line_style = "-" marker = None else: marker = "." line_style = None np_data = numpy.loadtxt(csv_file, delimiter=",", converters={ 0: convert_to_mdate}) np_data = np_data.transpose() xdata = np_data[0] ydata = [[]]*len(np_data) for i in range(1,len(np_data)): print i ydata[i-1] = numpy.asarray(np_data[i], dtype=numpy.float) plt.plot_date(x=xdata, y=ydata[i-1], linestyle=line_style, marker=marker, color=graph_color) plt.title(plot_title) plt.ylabel(y_label) plt.grid(True) # Get current axis and its xtick labels labels = plt.gca().get_xticklabels() for label in labels: label.set_rotation(30) plot_file_name = os.path.join(output_directory, output_filename + ".png") fig.savefig(plot_file_name) plt.close() return True, None
def plot(self, widget): plt.xlabel('Date/Time') plt.ylabel('Temperature [%sF]' % self.degree_symbol) plt.title('Recorded temperature history') plt.grid(True) plt.plot_date(self.plotX, self.plotY, fmt='bo', tz=None, xdate=True) plt.show()
def memplot(cmd,wait_time=0.05): usage = [] if isinstance(cmd,list): cmd = ' '.join(cmd) p = subprocess.Popen(cmd,shell=True) util_process = psutil.Process(p.pid) try: while util_process.status() != psutil.STATUS_ZOMBIE: stats = util_process.memory_info() children = util_process.children(recursive=True) c_rss = sum(map(lambda p: p.memory_info().rss, children)) usage.append((datetime.now(), stats.rss + c_rss)) sleep(wait_time) except KeyboardInterrupt: # on ctrl-c kill the subprocess p.kill() t,rss = zip(*usage) rss = map(lambda mem: mem / float(2**20), rss) # convert to mb plt.plot_date(x=t,y=rss,fmt="b-") plt.title(cmd) plt.ylabel("Memory Usage (mb)") plt.xlabel("Time") plt.grid(True) plt.show()
def plot_gestures_and_flux_score(plot_title, gestures, flux, flux_diffs): """ Plots a gesture score with flux values as well - this one suffers the window bug """ idx = gestures.index # ax = plt.figure(figsize=(35,10),frameon=False,tight_layout=True).add_subplot(111) ax = plt.figure(figsize=(14, 6), frameon=False, tight_layout=True).add_subplot(211) ax.xaxis.set_major_locator(dates.SecondLocator(bysecond=[0])) ax.xaxis.set_major_formatter(dates.DateFormatter("%H:%M")) ax.yaxis.grid() plt.ylim(-0.5, 8.5) plt.yticks(np.arange(9), ['n', 'ft', 'st', 'fs', 'fsa', 'vss', 'bs', 'ss', 'c']) plt.ylabel("gesture") for n in gestures.columns: plt.plot_date(idx.to_pydatetime(), gestures[n], '-', label=n) # Plot Flux Data ax2 = plt.subplot(212, sharex=ax) idx = flux.index plt.plot_date(idx.to_pydatetime(), flux, '-', label=flux.name) plt.ylabel("flux") # Possible New Ideas Stage # new_ideas = flux_diffs.ix[flux_diffs > transitions.NEW_IDEA_THRESHOLD] # new_ideas = new_ideas.index # new_idea_colour = 'r' # for n in range(len(new_ideas)): # x_val = new_ideas[n].to_pydatetime() # ax.axvline(x=x_val, color=new_idea_colour, alpha=0.7, linestyle='--') # ax2.axvline(x=x_val, color=new_idea_colour, alpha=0.7, linestyle='--') # Output Stage plt.savefig(plot_title.replace(":", "_") + '.pdf', dpi=300, format="pdf") plt.close()
def graph_csv_new(output_directory, csv_files, plot_title, output_filename, columns, y_label=None, precision=None, graph_height="600", graph_width="1500", graph_type="line", graph_color="black"): y_label = y_label or plot_title fig = plt.figure() fig.set_size_inches(float(graph_width) / 80, float(graph_height) / 80) if graph_type == "line": line_style = "-" marker = None else: marker = "." line_style = None colors = ['red', 'green', 'blue', 'yellow'] i = 0 for csv_file in csv_files: days, impressions = numpy.loadtxt(csv_file, unpack=True, delimiter=",", converters={ 0: convert_to_mdate}) plt.plot_date(x=days, y=impressions, linestyle=line_style, marker=marker, color=colors[i]) i+=1 plt.title(plot_title) plt.ylabel(y_label) plt.grid(True) # Get current axis and its xtick labels labels = plt.gca().get_xticklabels() for label in labels: label.set_rotation(30) plot_file_name = os.path.join(output_directory, output_filename + ".png") fig.savefig(plot_file_name) plt.close() return True, None
def plot_time_series(timestamp, values, title, **kwargs): plt.rcParams['figure.figsize'] = 14, 7 plt.plot_date(timestamp, values, fmt='g-', tz='utc', **kwargs) plt.title(title) plt.xlabel('Year') plt.ylabel('Dollars per Barrel') plt.rcParams.update({'font.size': 16})
def explore(): """ Prediction for beginning of July was bad. Consistently predicted weekend ridership too high. Probably because of July 4th holiday stuff! Dec. 7-8 count goes really low. """ nbsm = NonlinearBikeShareModel(train) bounds = nbsm.bounds() beta0 = nbsm.beta0() print 'bounds', len(bounds) print 'beta0', len(beta0) res = optimize.minimize(nbsm, beta0, method='L-BFGS-B', jac=True, bounds=bounds, options={'disp': True, 'maxiter': 10000}) #res = optimize.basinhopping(nbsm, beta0, minimizer_kwargs={'method':'L-BFGS-B', 'jac':True, 'bounds':bounds, 'options': {'disp': True, 'maxiter': 500}}, disp=True, niter=1) print res nbsm.show(res.x) plt.plot_date(train['dates'], train['count']) plt.plot_date(train['dates'], nbsm.count(res.x)) plt.show()
def process_light(text, id, has_queried_date=False): if not has_queried_date: set_user_state(id, 'light') return { 'type': 'text', 'text': '請輸入日期(e.g., 2020-01-08)或幾分鐘前的資料(e.g., 5分鐘前):' } else: try: if '-' in text: year, month, day = text.split('-') year = int(year) month = int(month) day = int(day) start_time = int( datetime(year, month, day, 0, 0, 0, tzinfo=timezone.utc).timestamp()) end_time = int( datetime(year, month, day, 23, 59, 59, tzinfo=timezone.utc).timestamp()) else: minutes_before = int(re.sub('[^0-9]', '', text)) start_time = int(time.time() - 60 * minutes_before) end_time = int(time.time()) except ValueError as e: # wrong format return { 'type': 'text', 'text': '輸入格式不對!請再次輸入日期(e.g., 2020-01-08)或幾分鐘前的資料(e.g., 5分鐘前):' } # return a graph that contains lighting information # start_time = 1610064000 # GMT 2021-01-08 00:00:00 # end_time = 1610150399 # GMT 2021-01-08 23:59:59 # sort by time values = query_light(start_time, end_time) # check if there's items in the query if len(values) > 0: values.sort(key=lambda val: val[0]) # convert all times to matplotlib format times, light_vals = zip(*values) times = [ matplotlib.dates.date2num(datetime.fromtimestamp(t)) for t in times ] # plot the graph fileobj = io.BytesIO() plt.plot_date(times, light_vals, 'b-', tz='Asia/Taipei') plt.gcf().autofmt_xdate() plt.ylabel('Light Intensity') plt.ylim(0, 1000) plt.savefig(fileobj, format='png') plt.clf() fileobj.seek(0) # filename pattern: two digit hash of id/id/timestamp id_hash = hashlib.sha1(id.encode('utf-8')).hexdigest()[:2] timestamp = time.strftime("%Y-%m-%d_%H-%M-%S", time.gmtime()) object_name = id_hash + '/' + id + '/' + timestamp + '.png' url = upload_fileobj(fileobj, 'smarteyes-linebotserver', object_name) response = { 'type': 'image', 'originalContentUrl': url, 'previewImageUrl': url } else: response = {'type': 'text', 'text': '這時段沒有資料喔!'} set_user_state(id, '') return response
def get_ms_fcst(file_out5, file_out28, today_date): import requests as req import urllib.request as urlreq import time from bs4 import BeautifulSoup import pandas as pd import numpy as np import os from datetime import datetime import matplotlib.pyplot as plt # NWS Observations and Forecasts for the Lower Mississippi/Ohio site nws = req.get('https://www.weather.gov/lmrfc/obsfcst_mississippi') # Parse html data from website nws_html = BeautifulSoup( nws.text, "html.parser") # NOTE: data is pre-formatted <pre> ###################### Get 5-day forecast data ########################## dum = nws_html.findAll('a')[118] pred_5day = dum['href'] html_5day = BeautifulSoup(req.get(pred_5day).text, "html.parser") text_5day = html_5day.find('pre').contents[0] # Separate the rows specified in the data by \n split_5day = text_5day.string.split('\n') # rows are separated by \n # Separate items in row (space) list_5day = [[]] * np.size(split_5day) for i in range(np.size(split_5day)): list_5day[i] = split_5day[i].split( ) # separate each entry within the row # Combine city names into one cell if multiple words (e.g., Baton Rouge, New Orleans) for i in range(np.size(list_5day)): try: list_5day[i][0] if list_5day[i][0] == '': next elif list_5day[i][1] == '': next elif list_5day[i][1].isnumeric(): list_5day[i][1] = list_5day[i][1] elif list_5day[i][1].isalpha(): list_5day[i][0] = list_5day[i][0] + ' ' + list_5day[i][1] del list_5day[i][1] except: continue # Combine city names into one cell if more than two words (e.g., Red River Landing) for i in range(np.size(list_5day)): try: list_5day[i][0] if list_5day[i][0] == '': next elif list_5day[i][1] == '': next elif list_5day[i][1] == 'TDA': next elif list_5day[i][1].isnumeric(): list_5day[i][1] = fcst_5day[i][1] elif list_5day[i][1].isalpha(): list_5day[i][0] = list_5day[i][0] + ' ' + list_5day[i][1] del list_5day[i][1] except: continue # Put list into dataframe fcst_5day = pd.DataFrame(list_5day) # Pause so website doesn't think we're a hacker (probably more relevant for loops) time.sleep(1) ##################### Get 28-day forecast data ############################ dum = nws_html.findAll('a')[119] pred_28day = dum['href'] html_28day = BeautifulSoup(req.get(pred_28day).text, "html.parser") text_28day = html_28day.findAll('pre')[0] # Separate the rows specified in the data by \n split_28day = text_28day.string.split('\n') # rows are separated by \n list_28day = [[]] * np.size(split_28day) for i in range(np.size(split_28day)): list_28day[i] = split_28day[i].split( ) # separate each entry within the row # Put list into data frame fcst_28day = pd.DataFrame(list_28day) ############################# Save CSV output ######################## # Save 5-day forecast file import ntpath path_out5 = ntpath.dirname(file_out5) try: os.mkdir( path_out5) # create path for today (if not previously created) pd.DataFrame.to_csv( fcst_5day, path_out5 + "\\" + "test_FORECAST_" + today_date + ".csv") except: pd.DataFrame.to_csv( fcst_5day, path_out5 + "\\" + "test_FORECAST_" + today_date + ".csv") # Save 28-day forecast file path_out28 = ntpath.dirname(file_out28) try: os.mkdir( path_out28) # create path for today (if not previously created) pd.DataFrame.to_csv(fcst_28day, path_out28 + "\\" + "test_24hr change NWS_" + today_date + ".csv", date_format='%mm-%dd-%YY') except: pd.DataFrame.to_csv(fcst_28day, path_out28 + "\\" + "test_24hr change NWS_" + today_date + ".csv", date_format='%mm-%dd-%YY') ############################### Plot Stages for New Orleans ########################## fcst28 = np.array(fcst_28day) k, l = np.where( fcst28 == "NORL1") # used k and l as indices since i,j had been used dates_fcst21_NO = [ datetime.strptime(x, "%m-%d-%y").date() for x in fcst28[int(k) + 1:int(k) + 22, 0] ] ######## Plot river stage forecast for 21 days plt.figure(figsize=[8, 4]) fcst_28_NO = np.float64(fcst28[int(k) + 1:int(k) + 21 + 1, int(l)]) plt.plot_date(dates_fcst21_NO, fcst_28_NO, fmt='--', color='purple', xdate=True, ydate=False) # Add condtionals for the y-limits to adjust for scaling (depends on river max height) if np.max(fcst_28_NO) >= 14: plt.ylim((10, 20)) plt.hlines(11, dates_fcst21_NO[0], dates_fcst21_NO[-1], colors='r') # Stage 1 Flood Marker plt.hlines(15, dates_fcst21_NO[0], dates_fcst21_NO[-1], colors='g') # Stage 2 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1', 'Phase 2'], loc="lower left") elif np.max(fcst_28_NO) >= 10 and np.max(fcst_28_NO) < 14: plt.ylim((6, 16)) plt.hlines(11, dates_fcst21_NO[0], dates_fcst21_NO[-1], colors='r') # Stage 1 Flood Marker plt.hlines(15, dates_fcst21_NO[0], dates_fcst21_NO[-1], colors='g') # Stage 2 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1', 'Phase 2'], loc="lower left") elif np.max(fcst_28_NO) < 10: plt.ylim((2, 12)) plt.hlines(11, dates_fcst21_NO[0], dates_fcst21_NO[-1], colors='r') # Stage 1 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1'], loc="lower left") plt.xlim((dates_fcst21_NO[0], dates_fcst21_NO[-1])) plt.gca().set plt.grid(True) plt.xticks(dates_fcst21_NO) plt.xticks(dates_fcst21_NO[0:21:5], dates_fcst21_NO[0:21:5]) plt.title("Mississippi River at New Orleans") plt.xlabel("River Stage (FT)") plt.tight_layout() plt.savefig(path_out28 + "\\" + "test_24hr_change_NWS_NewOrleans_" + today_date + ".png") del k, l ############################### Plot Stages for Red River Landing ########################## fcst28 = np.array(fcst_28day) k, l = np.where( fcst28 == "RRLL1") # used k and l as indices since i,j had been used dates_fcst21_RR = [ datetime.strptime(x, "%m-%d-%y").date() for x in fcst28[int(k) + 1:int(k) + 22, 0] ] ######## Plot river stage forecast for 21 days plt.figure(figsize=[8, 4]) fcst_28_RR = np.float64(fcst28[int(k) + 1:int(k) + 21 + 1, int(l)]) plt.plot_date(dates_fcst21_RR, fcst_28_RR, fmt='--', color='purple', xdate=True, ydate=False) # Add condtionals for the y-limits to adjust for scaling if np.max(fcst_28_RR) >= 55: plt.ylim((45, 65)) plt.hlines(48, dates_fcst21_RR[0], dates_fcst21_RR[-1], colors='r') # Stage 1 Flood Marker plt.hlines(56, dates_fcst21_RR[0], dates_fcst21_RR[-1], colors='g') # Stage 1 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1', 'Phase 2'], loc="lower left") elif np.max(fcst_28_RR) >= 45 and np.max(fcst_28_RR) < 55: plt.ylim((40, 60)) plt.hlines(48, dates_fcst21_RR[0], dates_fcst21_RR[-1], colors='r') # Stage 1 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1'], loc="lower left") elif np.max(fcst_28_RR) >= 35 and np.max(fcst_28_RR) < 45: plt.ylim((30, 50)) plt.hlines(48, dates_fcst21_RR[0], dates_fcst21_RR[-1], colors='r') # Stage 1 Flood Marker plt.legend(['NWS Stage, FT', 'Phase 1'], loc="lower left") elif np.max(fcst_28_RR) >= 25 and np.max(fcst_28_RR) < 35: plt.ylim((20, 40)) plt.legend(['NWS Stage, FT'], loc="lower left") elif np.max(fcst_28_RR) >= 15 and np.max(fcst_28_RR) < 25: plt.ylim((10, 30)) plt.legend(['NWS Stage, FT'], loc="lower left") elif np.max(fcst_28_RR) < 15: plt.ylim((0, 20)) plt.legend(['NWS Stage, FT'], loc="lower left") plt.xlim((dates_fcst21_RR[0], dates_fcst21_RR[-1])) plt.gca().set plt.grid(True) plt.xticks(dates_fcst21_RR) plt.xticks(dates_fcst21_RR[0:21:5], dates_fcst21_RR[0:21:5]) plt.title("Mississippi River at Red River Landing") plt.xlabel("River Stage (FT)") plt.tight_layout() plt.savefig(path_out28 + "\\" + "test_24hr_change_NWS_RedRiverLanding_" + today_date + ".png")
# hdd_d70 float64 # temp_f_dmin float64 # temp_f_dmax float64 # windsp_mph_davg float64 # recs int64 df1 = pd.read_sql_table("v_e1248_daily", engine) tempf_vals = df1['temp_f_davg'] dater = df1['d_utc'] # Make the figure wider to see things better plt.figure(figsize=(15, 10)) plt.plot_date(dater, tempf_vals, linestyle="solid", color="#5a7d9a", linewidth=1, marker='') # Get the current figure (gcf) and auto format the x to try to fit it better in the plot plt.gcf().autofmt_xdate() # reformat the date date_format = mpl_dates.DateFormatter('%b %y') plt.gca().xaxis.set_major_formatter(date_format) plt.vlines('2017-07-01', ymin=1, ymax=80, colors='#29d193', label='Construction Begins', linewidth=2)
elif (filtype == 'lp') | (filtype == 'hp'): title = 'RSAM: ' + site + ', date: ' + date1 + '-' + date2 + ' UT, filter: ' + \ filtype + ' ' + strf + ' Hz' + ', plotted at: ' + \ now.strftime("%Y-%m-%d %H:%M") + ', BTL = ' + basetrig elif filtype == 'bp': title = 'RSAM: ' + site + ', date: ' + date1 + '-' + date2 + ' UT, filter: ' + filtype + \ ' ' + strf1 + ' - ' + strf2 + ' Hz' + \ ', plotted at: ' + now.strftime("%Y-%m-%d %H:%M") + ', BTL = ' + basetrig fig = plt.figure(figsize=(15, 5)) maxy = 1.1 * tr.data.max() plt.ylim(ymin=0, ymax=maxy) #base trigger level on plot, if in scale if basetrig != 'null': bt = float(basetrig) half = bt / 2 plt.axhline(y=bt, linestyle='--', color = 'red') #colour areas based on relation to BTL plt.axhspan(0, half, alpha=0.1, color='green') #low rectangle plt.axhspan(half, bt, alpha=0.1, color='orange') #moderate rectangle plt.axhspan(bt, 100000, alpha=0.1, color='red') #high rectangle plt.title(title) plt.ylabel('Ground Velocity (nm/s)') plt.plot_date(t, tr.data, linestyle='-', marker='None', color='black') plt.xlim(t[0], t[-1]) plt.savefig(plot_file, dpi=200) # plt.show()
else: marker_1 = "^" label_1 = 'icm' if i == 'ideam-icm': type_1 = '--' color_1 = 'red' if kk == 'd02': marker_1 = 'D' else: marker_1 = "^" label_1 = 'icm' plt.plot_date(comparacion.date, comparacion.T2, color=color_1, linestyle='-', marker=marker_1, label=label_1 + ' ' + kk) plt.plot_date(comparacion.date, comparacion.tmp_2m, '-', color='k', label='Estación automática') plt.legend() plt.xlabel('Fecha - Hora') plt.ylabel('Temperatura °C') plt.xticks(rotation=90) plt.savefig( '/media/edwin/6F71AD994355D30E/Edwin/Maestría Meteorologia/Tesis/Tesis_Edwin_20190226/comparacion_grafica/'
def main(): #experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12 experiment_ids_p = ['djznw', 'djzny', 'djznq', 'dklzq', 'dkmbq', 'dkjxq'] # Most of Params experiment_ids_e = ['dklwu', 'dklyu', 'djzns', 'dkbhu', 'djznu', 'dkhgu'] # Most of Explicit #experiment_ids = ['djzny', 'djznq', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ] #plt.ion() NUM_COLOURS = 15 cmap = cm.get_cmap(cm.Set1, NUM_COLOURS) #cgen = (cmap(1.*i/NUM_COLORS) for i in range(NUM_COLORS)) for ls in ['sea']: fig = plt.figure(figsize=(16, 8)) ax = fig.add_subplot(111) legendEntries = [] legendtext = [] plot_trmm = np.load('%s%s_%s' % (trmm_dir, ls, trmm_file)) dates_trmm = [] p = [] for dp in plot_trmm['hour']: print dp if ((int(dp) < 23) & (int(dp) >= 6)): dates_trmm.append(datetime.datetime(2011, 8, 21, int(dp), 0)) p.append(plot_trmm['mean'][plot_trmm['hour'] == dp]) if ((int(dp) >= 0) & (int(dp) <= 6)): dates_trmm.append(datetime.datetime(2011, 8, 22, int(dp), 0)) p.append(plot_trmm['mean'][plot_trmm['hour'] == dp]) #print dates_trmm a = np.argsort(dates_trmm, axis=0) d_trmm = np.array(dates_trmm)[a] pl = (np.array(p)[a]) #pl=np.sort(pl,axis=1) l, = plt.plot_date(d_trmm + utc_to_local, pl, label='TRMM', linewidth=2, linestyle='-', marker='', markersize=2, fmt='', color='#262626') legendEntries.append(l) legendtext.append('TRMM') #l0=plt.legend(legendEntries, legendtext,title='', frameon=False, loc=9, bbox_to_anchor=(0.21, 0,1, 1)) l0 = plt.legend(legendEntries, legendtext, title='', frameon=False, loc=9, bbox_to_anchor=(0, 0, 1, 1)) # Change the legend label colors to almost black texts = l0.texts for t in texts: t.set_color('#262626') legendEntries = [] legendtext = [] for c, experiment_id in enumerate(experiment_ids_p): expmin1 = experiment_id[:-1] if (experiment_id == 'djznw'): print experiment_id colour = cmap(1. * 1 / NUM_COLOURS) linewidth = 0.2 linestylez = '--' if (experiment_id == 'djzny'): print experiment_id colour = cmap(1. * 3 / NUM_COLOURS) linewidth = 0.5 linestylez = '--' if ((experiment_id == 'djznq') or (experiment_id == 'dkjxq')): print experiment_id colour = cmap(1. * 5 / NUM_COLOURS) linewidth = 0.8 if (experiment_id == 'djznq'): linestylez = '--' if (experiment_id == 'dkjxq'): linestylez = ':' if ((experiment_id == 'dklzq') or (experiment_id == 'dklwu')): print experiment_id colour = cmap(1. * 7 / NUM_COLOURS) linewidth = 1 if (experiment_id == 'dklzq'): linestylez = '--' if (experiment_id == 'dklwu'): linestylez = '-' if ((experiment_id == 'dklyu') or (experiment_id == 'dkmbq')): print experiment_id colour = cmap(1. * 9 / NUM_COLOURS) linewidth = 1.3 if (experiment_id == 'dkmbq'): linestylez = '--' if (experiment_id == 'dklyu'): linestylez = '-' if (experiment_id == 'djzns'): print experiment_id colour = cmap(1. * 11 / NUM_COLOURS) linewidth = 1.6 linestylez = '-' if ((experiment_id == 'dkbhu') or (experiment_id == 'dkhgu')): print experiment_id colour = cmap(1. * 13 / NUM_COLOURS) linewidth = 1.9 if (experiment_id == 'dkbhu'): linestylez = '-' if (experiment_id == 'dkhgu'): linestylez = ':' if (experiment_id == 'djznu'): print experiment_id colour = cmap(1. * 15 / NUM_COLOURS) linewidth = 2. linestylez = '-' try: plotnp = np.load( '%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max)) l, = plt.plot_date( d, plotnp[0] * 3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth * 1.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour) legendEntries.append(l) legendtext.append( '%s' % (model_name_convert_legend.main(experiment_id))) except Exception, e: print e pass #l1=plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, bbox_to_anchor=(0, 0,1, 1)) l1 = plt.legend(legendEntries, legendtext, title='Parametrised', loc=9, frameon=False, bbox_to_anchor=(-0.255, 0, 1, 1)) # Change the legend label colors to almost black texts = l1.texts for t in texts: t.set_color('#262626') legendEntries = [] legendtext = [] c1 = 0 for c, experiment_id in enumerate(experiment_ids_e): if (experiment_id == 'djznw'): print experiment_id colour = cmap(1. * 1 / NUM_COLOURS) linewidth = 0.2 linestylez = '--' if (experiment_id == 'djzny'): print experiment_id colour = cmap(1. * 3 / NUM_COLOURS) linewidth = 0.5 linestylez = '--' if ((experiment_id == 'djznq') or (experiment_id == 'dkjxq')): print experiment_id colour = cmap(1. * 5 / NUM_COLOURS) linewidth = 0.8 if (experiment_id == 'djznq'): linestylez = '--' if (experiment_id == 'dkjxq'): linestylez = ':' if ((experiment_id == 'dklzq') or (experiment_id == 'dklwu')): print experiment_id colour = cmap(1. * 7 / NUM_COLOURS) linewidth = 1 if (experiment_id == 'dklzq'): linestylez = '--' if (experiment_id == 'dklwu'): linestylez = '-' if ((experiment_id == 'dklyu') or (experiment_id == 'dkmbq')): print experiment_id colour = cmap(1. * 9 / NUM_COLOURS) linewidth = 1.3 if (experiment_id == 'dkmbq'): linestylez = '--' if (experiment_id == 'dklyu'): linestylez = '-' if (experiment_id == 'djzns'): print experiment_id colour = cmap(1. * 11 / NUM_COLOURS) linewidth = 1.6 linestylez = '-' if ((experiment_id == 'dkbhu') or (experiment_id == 'dkhgu')): print experiment_id colour = cmap(1. * 13 / NUM_COLOURS) linewidth = 1.9 if (experiment_id == 'dkbhu'): linestylez = '-' if (experiment_id == 'dkhgu'): linestylez = ':' if (experiment_id == 'djznu'): print experiment_id colour = cmap(1. * 15 / NUM_COLOURS) linewidth = 2. linestylez = '-' expmin1 = experiment_id[:-1] try: plotnp = np.load( '%s/%s/%s/%s_%s_rainfall_diurnal_np_domain_constrain_lat_%s-%s_lon-%s-%s.npy' % (top_dir, expmin1, experiment_id, pp_file, ls, lat_min, lat_max, lon_min, lon_max)) l, = plt.plot_date( d, plotnp[0] * 3600, label='%s' % (model_name_convert_legend.main(experiment_id)), linewidth=linewidth * 1.5, linestyle=linestylez, marker='', markersize=2, fmt='', color=colour) legendEntries.append(l) legendtext.append( '%s' % (model_name_convert_legend.main(experiment_id))) except Exception, e: print e pass
figure = plot.figure(figsize=(12, 8), dpi=100) ax1 = subplot(111) plot.suptitle( u'Verðtryggð lán Principal = %s ISK in %s @ %.1f%% base rate %s' % (comma_format(Principal), sys.argv[3], Interest * 100, projectedTitle), fontsize=13) plot.ylim(0) # Was any data projected forward #print len(x_dates), len(P) if projectedDate == -1: projectedDate = duration # Plot over duration of loan plot.plot_date(x_dates[0:projectedDate], P[0:projectedDate], label="Capital Outstanding", color='black', marker='', ls='-', lw=3.0) ax2 = ax1.twinx() plot.plot_date(x_dates[0:projectedDate], paid[0:projectedDate], label="Monthly payment", marker='', ls='-', lw=3.0, color='red') else: # Plot projected as a dashed line plot.plot_date(x_dates[0:projectedDate], P[0:projectedDate], label="Capital Outstanding",
def finalize(self, all_data): ci = 0 color_dict = {} for channel in self.channels: if "figsize" in self.kwargs: plt.figure(figsize=self.kwargs["figsize"]) else: plt.figure() for s in list(all_data.keys()): data = all_data[s][channel] # Determine how to label this curve: label = "_nolegend_" if "Run_Number" not in s.tags or s.tags["Run_Number"] == 0: if "label_by_tag" in self.kwargs: tag = self.kwargs["label_by_tag"] tag_value = s.tags[tag] label = "{} = {}".format(tag, tag_value) elif "label_by_expt" in self.kwargs: if "label_dict" in self.kwargs: label = self.kwargs["label_dict"][s.experiment_id] else: label = s.experiment_id # Determine how to color this curve: if "color_by_tag" in self.kwargs: vmin = self.kwargs["color_vmin"] vmax = self.kwargs["color_vmax"] tag_value = s.tags[self.kwargs["color_by_tag"]] c = cm.ScalarMappable(norm=(tag_value - vmin) / (vmax - vmin), cmap="viridis") elif "color_by_expt" in self.kwargs: if s.experiment_id in color_dict: c = color_dict[s.experiment_id] else: c = "C{}".format(ci) ci += 1 color_dict[s.experiment_id] = c # Plot by date if a ref date has been given if "ref_date" in self.kwargs: ref_date = self.kwargs["ref_date"] mdate_arr = convert_day_number_array_to_mdate( np.arange(np.size(data)), ref_date) # Now plotting: if "c" in locals(): plt.plot_date(mdate_arr, data, label=label, c=c, ls='-', marker=',') else: plt.plot_date(mdate_arr, data, label=label, ls='-', marker=',') else: if "c" in locals(): plt.plot(data, label=label, c=c) else: plt.plot(data, label=label) plt.xlabel("Simulation Time") if "plot_xlim" in self.kwargs: plt.xlim(self.kwargs["plot_xlim"]) if "plot_ylim" in self.kwargs: plt.ylim(self.kwargs["plot_ylim"]) plt.ylabel(channel) plt.legend() plt.tight_layout() plt.savefig(os.path.join(self.working_dir, channel)) if "plot_show" in self.kwargs and self.kwargs["plot_show"]: plt.show()
def conv_neural_network(inputs): oil_train, stock_train, oil_test, stock_test, oil_price, stock_price = inputs cost = tf.reduce_mean(tf.square(prediction - y)) optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost) #oil_train, stock_train, oil_test, stock_test = inputs oil_train, stock_train, oil_test, stock_test = refine_input_with_lag( oil_train, stock_train, oil_test, stock_test) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) #Running neural net for epoch in range(hm_epoch): epoch_loss = 0 for index in range(int(len(oil_train.values) / input_size[0])): x_in = np.zeros((input_size[1], input_size[0], 1, 1)) for index_in, value in enumerate( oil_train.values[index * input_size[0]:index * input_size[0] + input_size[0]]): x_in[int(value), index_in, 0, 0] = 1 y_in = stock_train.values[index * input_size[0]:index * input_size[0] + input_size[0]] _, c = sess.run([optimizer, cost], feed_dict={ x: x_in, y: y_in }) epoch_loss += c print('Epoch', epoch, 'completed out of', hm_epoch, 'loss:', epoch_loss) correct = tf.reduce_mean(tf.square(tf.subtract(prediction, y))) total = 0 cor = 0 for index in range(int(len(oil_test.values) / input_size[0])): x_in = np.zeros((input_size[1], input_size[0], 1, 1)) for index_in, value in enumerate( oil_test.values[index * input_size[0]:index * input_size[0] + input_size[0]]): x_in[int(value), index_in, 0, 0] = 1 y_in = stock_test.values[index * input_size[0]:index * input_size[0] + input_size[0]] total += input_size[0] if abs(correct.eval(feed_dict={x: x_in, y: y_in})) < 5: cor += input_size[0] saver = tf.train.Saver() print('Accuracy:', cor / total) save_path = saver.save(sess, "data/model/recurrent/recurrent.ckpt") print("Model saved in file: %s" % save_path) predictions = [] for index in range(int(len(oil_price.values) / input_size[0])): x_in = np.zeros((input_size[1], input_size[0], 1, 1)) for index_in, value in enumerate( oil_price.values[index * input_size[0]:index * input_size[0] + input_size[0]]): x_in[int(value), index_in, 0, 0] = 1 predictions += sess.run(prediction, feed_dict={x: x_in})[0].tolist() date_labels = oil_price.index date_labels = matplotlib.dates.date2num( date_labels.to_pydatetime())[:-14] plt.plot_date(date_labels, predictions, 'b-', label="RNN Predictions") plt.plot_date(date_labels, stock_price.values[:-14], 'r-', label='Stock Prices') plt.legend() plt.ylabel('Price') plt.xlabel('Year') plt.show()
def plot(self): plt.plot_date(self.list_date[self.nday:], self.list_ma[self.nday:], "-", lw=1) plt.plot_date(self.list_date[self.nday:], self.list_line1[self.nday:], "--") plt.plot_date(self.list_date[self.nday:], self.list_line2[self.nday:], "--") plt.plot_date(self.list_date[self.nday:], self.list_line3[self.nday:], "--") plt.plot_date(self.list_date[self.nday:], self.list_line4[self.nday:], "--") plt.plot_date(self.list_date, self.list_close, "-") plt.plot_date(self.list_trade_date, self.list_trade, "ro") plt.plot_date(self.list_trade_out_date, self.list_trade_out, "go") plt.grid() plt.show()
plt.xlabel('Months') plt.ylabel('Stock price ' + titleVar) mpld3.fig_to_html(fig1) mpld3.save_html(fig1, "plot.html") fig2 = plt.figure(2) start = datetime.datetime.now().date() + datetime.timedelta(-31) stock_val = pd.read_csv('stock.csv') new_stock_val = stock_val[['Date', 'Close']] new_stock_val = stock_val[pd.to_datetime(stock_val['Date']) > pd.to_datetime( start)] ## Select values only upto previous month. This was hard :( new_stock_val.to_csv('stock2.csv') x = new_stock_val['Date'] y = new_stock_val['Close'] plt.grid() plt.plot_date(x, y, fmt="r-", marker='o', color='b') ## show blue dots fnt1 = plt.title("Last 1 month stock value of " + titleVar) fnt1.set_size(18) plt.gcf().autofmt_xdate() ## Rotate text fig2.set_size_inches(16, 7.2) fig2.savefig( 'img/plots/plot.png', dpi=100, bbox_inches="tight") ## Saving output as png image.Remove padding. fig3, ax = plt.subplots() plt.grid() start = datetime.datetime.now().date() + datetime.timedelta(-92) ## 3 months stock_val = pd.read_csv('stock.csv') new_stock_val = stock_val[['Date', 'Adj Close']] new_stock_val = stock_val[pd.to_datetime(stock_val['Date']) > pd.to_datetime( start)] ## Select values only upto previous month. This was hard :(
def make_network_plot(intf_pairs, stems, tbaseline, xbaseline, plotname): print("printing network plot") if len(intf_pairs) == 0: print( "Error! Cannot make network plot because there are no interferograms. " ) sys.exit(1) xstart = [] xend = [] tstart = [] tend = [] # If there's a format like "S1A20160817_ALL_F2:S1A20160829_ALL_F2" if "S1" in intf_pairs[0]: for item in intf_pairs: scene1 = item[0:18] # has some format like S1A20160817_ALL_F2 scene2 = item[19:] # has some format like S1A20160817_ALL_F2 for x in range(len(stems)): if stems[x] == scene1: xstart.append(xbaseline[x]) tstart.append( dt.datetime.strptime(str(int(tbaseline[x]) + 1), '%Y%j')) if stems[x] == scene2: xend.append(xbaseline[x]) tend.append( dt.datetime.strptime(str(int(tbaseline[x]) + 1), '%Y%j')) # # If there's a format like "2017089:2018101".... # if len(intf_pairs[0])==15: # dtarray=[]; im1_dt=[]; im2_dt=[]; # for i in range(len(times)): # dtarray.append(dt.datetime.strptime(str(times[i])[0:7],'%Y%j')); # # Make the list of datetimes for the images. # for i in range(len(intf_pairs)): # scene1=intf_pairs[i][0:7]; # scene2=intf_pairs[i][8:15]; # im1_dt.append(dt.datetime.strptime(scene1,'%Y%j')); # im2_dt.append(dt.datetime.strptime(scene2,'%Y%j')); # # Find the appropriate image pairs and baseline pairs # for i in range(len(intf_pairs)): # for x in range(len(dtarray)): # if dtarray[x] == im1_dt[i]: # xstart.append(baselines[x]); # tstart.append(dtarray[x]); # if dtarray[x] == im2_dt[i]: # xend.append(baselines[x]); # tend.append(dtarray[x]); plt.figure() plt.plot_date(tstart, xstart, '.b') plt.plot_date(tend, xend, '.b') for i in range(len(tstart)): plt.plot_date([tstart[i], tend[i]], [xstart[i], xend[i]], 'b') yrs_formatter = mdates.DateFormatter('%m-%y') plt.xlabel("Date") plt.gca().xaxis.set_major_formatter(yrs_formatter) plt.ylabel("Baseline (m)") plt.title("Network Geometry - Inclusive") plt.savefig(plotname) plt.close() print("finished printing network plot") return
request_port = 5670 response_port = 5671 edas_server = "localhost" try: portal = EDASPortal(edas_server, request_port, response_port) response_manager = portal.createResponseManager() datainputs = """[domain=[{"name":"d0","lat":{"start":70,"end":90,"system":"values"},"lon":{"start":5,"end":45,"system":"values"},"level":{"start":0,"end":0,"system":"indices"}}],variable=[{"uri":"file:///Users/tpmaxwel/.edas/cache/collections/NCML/MERRA_DAILY.ncml","name":"t:v1","domain":"d0"}],operation=[{"name":"CDSpark.average","input":"v1","domain":"d0","axes":"xy"}]]""" rId1 = portal.sendMessage("execute", ["WPS", datainputs, '{ "response":"file" }']) fileResponses = response_manager.getResponseVariables(rId1) print "PLotting " + str(len(fileResponses)) + " responses" fileVar = fileResponses[0](squeeze=1) timeAxis = fileVar.getTime() data = fileVar.data list_of_datetimes = [ datetime.datetime(x.year, x.month, x.day, x.hour, x.minute, int(x.second)) for x in timeAxis.asComponentTime() ] dates = matplotlib.dates.date2num(list_of_datetimes) plt.plot_date(dates, data) plt.gcf().autofmt_xdate() plt.show() finally: portal.shutdown()
'../d07_text_station_5min_2013_03_06.txt') bystation = {g[0]: g[1] for g in rawdata.groupby('station')} freeway_metadata = loadfreewaydata.load_freeway_metadata( '../d07_stations_2012_09_06.txt') fwy = (405, 'S') start_station = 718287 end_station = 718300 path = freeway_metadata[fwy].loc[start_station:end_station].index # filter out stations with no speed observations path = [ station for station in path if bystation[station]['avgspeed'].count() > 0 ] print "path: " + str(path) five_mins = hour = np.timedelta64(1000000 * 60 * 5) start_times = [np.datetime64('2013-03-06 08:00:00')] for i in range(36): start_times.append(start_times[-1] + five_mins) #start_times = [np.datetime64('2013-03-06 08:{:0>2}:00'.format(i * 5)) for i in range(6)] print start_times travel_times = map( lambda x: traveltime.travel_time( x, path, bystation, freeway_metadata[fwy], time_granularity=60 * 5)[1], start_times) plt.plot_date(map(lambda x: x.astype(datetime.datetime), start_times), travel_times, '-')
) # finds index of the Range array that is that pivot value lastDate = dateRange[ dateloc] # Gets date corresponding to that index pivots.append(currentMax) # Adds pivot to pivot array dates.append(lastDate) # Adds pivot date to date array print() timeD = dt.timedelta(days=30) # Sets length of dotted line on chart for index in range(len(pivots)): # Iterates through pivot array # print(str(pivots[index])+": "+str(dates[index])) #Prints Pivot, Date couple plt.plot_date( [dates[index] - (timeD * .075), dates[index] + timeD ], # Plots horizontal line at pivot value [6 + pivots[index], pivots[index]], linestyle="--", linewidth=1, marker=',') plt.annotate(str(pivots[index]), (mdates.date2num(dates[index]), pivots[index]), xytext=(-10, 7), textcoords='offset points', fontsize=7, arrowprops=dict(arrowstyle='-|>')) plt.xlabel('Date') # set x axis label plt.ylabel('Price') # set y axis label plt.title(stock + " - Daily") # set title plt.ylim(prices["Low"].min(), prices["High"].max() * 1.05) # add margins # plt.yscale("log")
else: # if at least 5 years time_locator = ('year', (plot_end_date.year-plot_start_date.year)/5) # time locator on the plot; 'year' for year; 'month' for month. e.g., ('month', 3) for plot one tick every 3 months #======================================================== # Select data to be plotted #======================================================== s_rbm_to_plot = my_functions.select_time_range(s_rbm, plot_start_date, plot_end_date) s_usgs_to_plot = my_functions.select_time_range(s_usgs, plot_start_date, plot_end_date) #======================================================== # plot #======================================================== #============== plot daily data ===============# fig = plt.figure() ax = plt.axes() plt.plot_date(s_usgs_to_plot.index, s_usgs_to_plot, 'b-', label='USGS gauge') plt.plot_date(s_rbm_to_plot.index, s_rbm_to_plot, 'r--', label='Lohmann route') plt.ylabel('Flow (cfs)', fontsize=16) plt.title('%s, %s' %(usgs_site_name, usgs_site_code), fontsize=16) plt.legend() my_functions.plot_date_format(ax, time_range=(plot_start_date, plot_end_date), locator=time_locator, time_format='%Y/%m') fig = plt.savefig('%s.flow.daily.png' %output_plot_basename, format='png') #============== plot monthly data ===============# # calculate s_usgs_mon = my_functions.calc_monthly_data(s_usgs_to_plot) s_rbm_mon = my_functions.calc_monthly_data(s_rbm_to_plot) # plot fig = plt.figure() ax = plt.axes()
def run(self): audio_dtype = np.float32 secs_per_hour = 60 * 60 # number of seconds in an hour hours_of_buffer = 12 # number of hours to buffer in memory: audio_buffer and time_stamps, below buffer_period = secs_per_hour * hours_of_buffer # period of secs for entire buffer buffer_len = int( buffer_period * self.sample_rate ) # array length of audio data: audio_buffer and time_stamps default = np.float32(0.0) # default value if there is a decode failure pos = 0 # position within the buffer counter = 0 # counter in for-loop to determine when to queue the results or do other repeated tasks crying_blocks = [] # store crying blocks for a week average = 0.0 # average volume current_volume = 0.0 # current volume ########################################## # TODO: store buffers in sqlite database ########################################## # TODO add full os path with join sql_db_file = 'ls.sqlite' CreateSqlDatabase(sql_db_file) # read crying_blocks table for cb in ReadSqlTable(sql_db_file, 'crying_blocks'): #print 'cb is not None', (cb is not None) if cb is not None: crying_blocks.append({ 'start': cb.start, 'start_str': cb.start_str, 'stop': cb.stop, 'duration': cb.duration }) if len(crying_blocks) > 1: # sort crying blocks so that the most recent status is at the top crying_blocks = sorted( crying_blocks, key=lambda crying_block: crying_block['start'], reverse=True) # create data buffers for the audio and time stamps: # (1) set all the audio buffer elements zero # (2) set all time stamp elements to now time_stamps = np.empty(buffer_len, dtype=datetime.datetime) audio_buffer = np.zeros(buffer_len, dtype=audio_dtype) time_stamps[:] = datetime.datetime.fromtimestamp(time.time()) print "Processing MP3 queue buffer\n" # this is essentially a while-true loop for mp3data, timestamp in StreamBufferReader(self.queue_read): t1 = time.time( ) # start time used to calculate loop processing time below signaldata = None timestamp_str = timestamp.strftime("%Y-%m-%d %H:%M:%S.%f") try: signaldata = audioread.decode(StringIO(mp3data)) except (audioread.NoBackendError, audioread.ffdec.ReadTimeoutError): print timestamp_str, "\tDecode error. Setting default value." # convert the integer buffer to an array of np.float32 elements # this portion is an excerpt from librosa.core.load() # https://github.com/librosa/librosa/blob/master/librosa/core/audio.py if signaldata is not None: signal = [] for frame in signaldata: frame = librosa.util.buf_to_float(frame) signal.append(frame) if signal: signal = np.concatenate(signal) # Final cleanup for dtype and contiguity signal = np.ascontiguousarray(signal, dtype=audio_dtype) peak = np.abs(signal).max() # there was a decode failure, so set the peak value to default value else: peak = default # load the latest audio parameter and its time stamp in to the buffers time_stamps[pos] = timestamp audio_buffer[pos] = peak pos = (pos + 1) % buffer_len # roll the arrays so that the latest readings are at the end rolled_time_stamps = np.roll(time_stamps, shift=buffer_len - pos) rolled_audio_buffer = np.roll(audio_buffer, shift=buffer_len - pos) # apply some smoothing sigma = 4 * (self.sample_rate) rolled_audio_buffer = ndimage.gaussian_filter1d( rolled_audio_buffer, sigma=sigma, mode="reflect") # TODO perform this in parallel """ if counter % (self.sample_rate*30) is (self.sample_rate*30)-1: for i in range(hours_of_buffer): # get the last hour of data for the plot hour_chunks = int(secs_per_hour * self.sample_rate) start = hour_chunks*i end = hour_chunks*(i+1) hour_chunk = rolled_audio_buffer[start:end] hour_timestamps = rolled_time_stamps[start:end] # convert the time stamps to matplotlib format time_stamps_plt = matplotlib.dates.date2num(hour_timestamps) # generate plot of the past hour plt.figure() plt.plot_date(time_stamps_plt, hour_chunk, xdate=True, label='peak volume', linestyle='solid', marker=None) plt.gcf().autofmt_xdate() plt.legend(loc='best') plt.savefig('www/static/hour_window_' + str(hours_of_buffer - i)) plt.close() """ # every second update the image for the last hour window if counter % (self.sample_rate * 1) is (self.sample_rate * 1) - 1: # get the last hour of data for the plot hour_chunks = int(secs_per_hour * self.sample_rate) hour_chunk = rolled_audio_buffer[-hour_chunks:] hour_timestamps = rolled_time_stamps[-hour_chunks:] # calculate average for last hour average = np.mean(hour_chunk) * 100.0 # convert the time stamps to matplotlib format time_stamps_plt = matplotlib.dates.date2num(hour_timestamps) # generate plot of the past hour plt.figure() plt.plot_date(time_stamps_plt, hour_chunk, xdate=True, label='peak volume', linestyle='solid', marker=None) plt.gcf().autofmt_xdate() plt.legend(loc='best') # TODO add full os path with join plt.savefig('www/static/hour_window_1') plt.close() # get the last hour of data for the plot and re-sample to 1 value per second #hour_chunks = int(secs_per_hour * self.sample_rate) # xs = np.arange(hour_chunks) # f = interpolate.interp1d(xs, rolled_audio_buffer[-hour_chunks:]) # audio_plot = f(np.linspace(start=0, stop=xs[-1], num=3600)) # ignore positions with no readings mask = rolled_audio_buffer > 0 rolled_time_stamps = rolled_time_stamps[mask] rolled_audio_buffer = rolled_audio_buffer[mask] # partition the audio history into blocks of type: # 1. noise, where the volume is greater than noise_threshold # 2. silence, where the volume is less than noise_threshold noise = rolled_audio_buffer > self.noise_threshold silent = rolled_audio_buffer < self.noise_threshold # join "noise blocks" that are closer together than min_quiet_time new_crying_blocks = [] if np.any(noise): silent_labels, _ = ndimage.label(silent) silent_ranges = ndimage.find_objects(silent_labels) for silent_block in silent_ranges: start = silent_block[0].start stop = silent_block[0].stop # don't join silence blocks at the beginning or end if start == 0: continue interval_length = time.mktime(rolled_time_stamps[ stop - 1].timetuple()) - time.mktime( rolled_time_stamps[start].timetuple()) if interval_length < self.min_quiet_time: noise[start:stop] = True # find noise blocks start times and duration crying_labels, num_crying_blocks = ndimage.label(noise) crying_ranges = ndimage.find_objects(crying_labels) #print "Crying blocks:" for cry in crying_ranges: start = time.mktime( rolled_time_stamps[cry[0].start].timetuple()) stop = time.mktime(rolled_time_stamps[cry[0].stop - 1].timetuple()) duration = float(stop - start) #print "\t", duration, " < ", self.min_noise_time , ":\t", (duration < self.min_noise_time) # ignore isolated noises (i.e. with a duration less than min_noise_time) if duration < self.min_noise_time: continue # save some info about the noise block new_crying_blocks.append({ 'start': start, 'start_str': datetime.datetime.fromtimestamp(start).strftime( "%Y-%m-%d %I:%M:%S %p").lstrip('0'), 'stop': stop, 'duration': format_time_difference(start, stop) }) #print "\t", new_crying_blocks[-1] # update crying blocks if len(crying_blocks) == 0 and len(new_crying_blocks) == 1: crying_blocks.append(new_crying_blocks[-1]) InsertRow( sql_db_file, CryingBlocks(start=new_crying_blocks[-1]['start'], stop=new_crying_blocks[-1]['stop'], start_str=new_crying_blocks[-1]['start_str'], duration=new_crying_blocks[-1]['duration'])) else: if len(new_crying_blocks) > 0: start_diff = new_crying_blocks[-1][ 'start'] - crying_blocks[0]['start'] #print timestamp_str, "\t", peak, "\t", duration, " < ", self.min_noise_time, ": ", (duration < self.min_noise_time) if start_diff < self.min_noise_time: crying_blocks[0] = new_crying_blocks[-1] else: duration = new_crying_blocks[-1][ 'stop'] - new_crying_blocks[-1]['start'] if duration > self.min_noise_time: #print "appending crying block:\t", new_crying_blocks[-1] crying_blocks.append(new_crying_blocks[-1]) InsertRow( sql_db_file, CryingBlocks( start=new_crying_blocks[-1]['start'], stop=new_crying_blocks[-1]['stop'], start_str=new_crying_blocks[-1] ['start_str'], duration=new_crying_blocks[-1] ['duration'])) # only keep the last 7 days worth of crying blocks # this is broken, it stores duplicates now = time.time() new_crying_blocks = [] for crying_block in crying_blocks: if crying_block['start'] > (now - float(3600 * 24 * 7)): new_crying_blocks.append(crying_block) crying_blocks = new_crying_blocks # sort crying blocks so that the most recent status is at the top crying_blocks = sorted( crying_blocks, key=lambda crying_block: crying_block['start'], reverse=True) # determine how long the current state is time_current = time.time() time_crying = "" time_quiet = "" str_crying = "Baby noise for " str_quiet = "Baby quiet for " # update status strings if len(new_crying_blocks) == 0: if len(crying_blocks) > 0: time_quiet = str_quiet + format_time_difference( crying_blocks[0]['stop'], time_current) else: time_quiet = str_quiet + format_time_difference( time.mktime(rolled_time_stamps[0].timetuple()), time_current) else: if time_current - crying_blocks[0][ 'stop'] < self.min_quiet_time: time_crying = str_crying + format_time_difference( crying_blocks[0]['start'], time_current) else: time_quiet = str_quiet + format_time_difference( crying_blocks[0]['stop'], time_current) # every 30 seconds generate plots of the buffer and write the current state to the log file if counter % (self.sample_rate * 30) is (self.sample_rate * 30) - 1: f = open('littlesleeper.log', 'w') f.write(time_crying + '\n') f.write(time_quiet + '\n') if len(crying_blocks): f.write('Crying Blocks:\n') for crying_block in crying_blocks: f.write(crying_block['start_str'] + '\t' + crying_block['duration'] + '\n') f.close() # every second load the results in a queue that is read by BroadcastResults if not self.discard: #print timestamp_str, "\t", peak if counter % (self.sample_rate * 1) is (self.sample_rate * 1) - 1: current_str = "Current Noise Level: {curr}".format( curr=round(peak * 100.0, 2)) average_str = "Average Noise Level: {avg}".format( avg=round(average, 2)) results = { 'crying_blocks': crying_blocks, 'time_crying': time_crying, 'time_quiet': time_quiet, 'current_volume': current_str, 'average': average_str } self.results_buffer.put(results) self.results_buffer.task_done() # incremenent counter counter = (counter + 1) % (self.sample_rate * 30) # calculate processing time, it should not exceed 1/SAMPLE_RATE (secs) t2 = time.time() processing_time = (t2 - t1) * 1000.0 print "Processing time: {pt} (ms)".format( pt=round(processing_time, 2)) if self.StopThread: break # perform any cleanup here # update crying blocks in database print "Updating SQL database . . ." #print "Values in crying_blocks table:" #for cb in ReadSqlTable(sql_db_file, 'crying_blocks'): #print 'cb is not None', (cb is not None) # if cb is not None: # print "\t", cb.start, "\t", cb.stop, "\t", cb.start_str, "\t", cb.duration print "Rebuilding crying_blocks table" DeleteAllRows(sql_db_file, 'crying_blocks') print "Inserting values" for cb in crying_blocks: print "\t", cb["start"], "\t", cb["stop"], "\t", cb[ "start_str"], "\t", cb["duration"] InsertRow( sql_db_file, CryingBlocks(start=cb['start'], stop=cb['stop'], start_str=cb['start_str'], duration=cb['duration']))
Requires matplotlib, see http://matplotlib.org or search your package manager (Debian: apt-get install python-matplotlib) """ import sys import hdata, value from extra_data import Bombers as extra import matplotlib.pyplot as plt if __name__ == '__main__': showtotal = '--nototal' not in sys.argv legend = '--nolegend' not in sys.argv projected = '--project' in sys.argv data = value.extract_value(sys.stdin) fig = plt.figure() ax = fig.add_subplot(1,1,1) dates = [datum['date'].ordinal() for datum in data] if projected: if showtotal: gt = plt.plot_date(dates, [(e['total']+e['cshr']*25)/1e3 for e in data], fmt='ko-', tz=None, xdate=True, ydate=False, label='total', zorder=-2) gp = plt.plot_date(dates, [(e['cshr']*25)/1e3 for e in data], fmt='gd-', tz=None, xdate=True, ydate=False, label='projected', zorder=2) else: if showtotal: gt = plt.plot_date(dates, [e['total']/1e3 for e in data], fmt='ko-', tz=None, xdate=True, ydate=False, label='total', zorder=-2) gc = plt.plot_date(dates, [e['cash']/1e3 for e in data], fmt='g+-', tz=None, xdate=True, ydate=False, label='cash', zorder=2) for bi,b in enumerate(hdata.Bombers.data): bvalue = [datum['bvalues'][bi]/1e3 for datum in data if hdata.inservice(datum['date'], b)] if not any(bvalue): continue bdate = [datum['date'].ordinal() for datum in data if hdata.inservice(datum['date'], b)] gb = plt.plot_date(bdate, bvalue, fmt='o-', mew=0, color=extra[b['name']]['colour'], tz=None, xdate=True, ydate=False, label=b['name'], zorder=0) if legend: plt.legend(ncol=2, loc='upper left') plt.show()
def plot_best_model(): plt.close('all') columns = [ 'Tout', 'Toutavg24', 'vWind', 'vWindavg24' ] #, 'hours', 'hours2','hours3', 'hours4','hours5', 'hours6']#, 'hours7', 'hours8']#,'hours5', 'hours6'] X = all_data[columns] res = mlin_regression(y, X) timesteps = ens.gen_hourly_timesteps(dt.datetime(2015, 12, 17, 1), dt.datetime(2016, 1, 15, 0)) plt.subplot(2, 1, 1) plt.plot_date(timesteps, y, 'b', label='Actual prodution') plt.plot_date(timesteps, res.fittedvalues, 'r', label='Weather model') prstd, iv_l, iv_u = wls_prediction_std(res) plt.plot_date(timesteps, iv_u, 'r--', label='95% conf. int.') plt.plot_date(timesteps, iv_l, 'r--') mean_day_resid = [res.resid[i::24].mean() for i in range(24)] mean_resid_series = np.tile(mean_day_resid, 29) plt.plot_date(timesteps, res.fittedvalues + mean_resid_series, 'g', label='Weather model + avg daily profile') plt.ylabel('MW') plt.legend(loc=2) plt.subplot(2, 1, 2) plt.plot_date(timesteps, res.resid, '-', label='Residual') plt.plot_date(timesteps, mean_resid_series) plt.ylabel('MW') plt.legend() mape = np.mean(np.abs((res.fittedvalues + mean_resid_series - y) / y)) mape2 = np.mean(np.abs((res.resid) / y)) mae = np.mean(np.abs((res.fittedvalues + mean_resid_series - y))) print mape, mape2, mae res.summary() return res
def main(): parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--year', type=float) parser.add_argument('--day', type=float) parser.add_argument('--date', type=str) parser.add_argument('--date-end', type=str) parser.add_argument('--log', action='store_true', help='对数坐标') parser.add_argument('--diff', action='store_true', help='增加差值') parser.add_argument('--diff-only', action='store_true', help='只显示差值') options, codes = parse_args(parser) if not options: return # 处理日期参数 days = 365 if options['year']: days = options['year'] * 365 elif options['day']: days = options['day'] elif options['date']: ts = parse_date(options['date']).timestamp() * 1000 days = None if days: ts = (datetime.datetime.now() - datetime.timedelta(days=days)).timestamp() * 1000 if options['date_end']: ts_end = parse_date(options['date_end']).timestamp() * 1000 else: ts_end = None # 获取基金数据 pool = multiprocessing.dummy.Pool(40) funds = pool.map(partial(fund_detail, verbose=False), codes) funds = [i for i in funds if i] # ***** 打印结果 ***** # print_fund_brief(funds) # ***** 画图 ***** # matplotlib.rcParams['font.sans-serif'] = ['Source Han Sans SC'] matplotlib.rcParams['font.family'] = 'sans-serif' matplotlib.rcParams['axes.unicode_minus'] = False # 设置图表标题、日期格式 title = '收益率对比' if options['log']: title += ' - 对数坐标' fig = plt.figure(title, figsize=[6.4 * 1.5, 4.8 * 1.2]) ax = plt.gca() ax.xaxis.set_major_formatter(DateFormatter('%y-%m-%d')) lines = [] series = [] for fund in funds: # adjnavs = fund['adjnavs'][-500:] adjnavs = [i for i in fund['adjnavs'] if i[0] >= ts] if ts_end: adjnavs = [i for i in adjnavs if i[0] <= ts_end] if options['log']: rors = [math.log2(i[1] / adjnavs[0][1]) for i in adjnavs] else: rors = [(i[1] / adjnavs[0][1] - 1) * 100 for i in adjnavs] x = [i[0] for i in adjnavs] series.append([x, rors, fund]) if not options['diff_only']: x = np.array([ date2num(datetime.datetime.fromtimestamp(i[0] / 1000)) for i in adjnavs ]) y = np.array(rors) label = fund['code'] + ' ' + fund['name'] line, = plt.plot_date(x, y, fmt=',-', label=label, linewidth=1) lines.append(line) if options['diff'] or options['diff_only']: x0, y0, fund0 = series[0] m0 = {x0[i]: y0[i] for i in range(len(x0))} for x, y, fund in series[1:]: m = {x[i]: y[i] for i in range(len(x))} x = [i for i in m0 if i in m] y = [m[i] - m0[i] for i in x] x = [ date2num(datetime.datetime.fromtimestamp(i / 1000)) for i in x ] label = f'差值: {fund["name"]} - {fund0["name"]}' line, = plt.plot_date(x, y, fmt=',-', label=label, linewidth=1.5) lines.append(line) plt.grid() # toottip fig = plt.gcf() ax = plt.gca() annot = ax.annotate("", xy=(0, 0), xytext=(-96, 30), textcoords="offset points", bbox=dict(boxstyle="round", fc="w"), arrowprops=dict(arrowstyle="->")) annot.set_visible(False) def update_annot(line, annot, ind): x, y = line.get_data() _x = x[ind["ind"][0]] _y = y[ind["ind"][0]] annot.xy = (_x, _y) label = line.get_label() day = datetime.datetime.fromtimestamp( num2date(_x).timestamp()).strftime('%Y-%m-%d') text = '%s\n%s %.2f' % (label, day, _y) annot.set_text(text) def hover(event): """ update and show a tooltip while hovering an object; hide it otherwise """ if event.inaxes == ax: an_artist_is_hovered = False for line in lines: contains, ind = line.contains(event) if contains: an_artist_is_hovered = True update_annot(line, annot, ind) annot.set_visible(True) fig.canvas.draw_idle() if not an_artist_is_hovered: # one wants to hide the annotation only if no artist in the graph is hovered annot.set_visible(False) fig.canvas.draw_idle() # call 'hover' if there is a mouse motion fig.canvas.mpl_connect("motion_notify_event", hover) if options['log']: legend_title = '收益率 (对数坐标)' else: legend_title = '收益率 (%)' plt.legend(title=legend_title, fontsize='small') # plt.legend(mode='expand') # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.) plt.show()
def fitCurve(input_data, title, block_days, fill_dates, start_days): logistic_p0 = [500000, 0.3, 28] # Initialization t = np.array([i + 1 for i in range(len(input_data))]) # print(input_data) # print(input_data.head()) target_y = input_data[0].values logistic_params = leastsq(err_f, logistic_p0, args=(t, target_y)) logistic_p = logistic_params[0] # print("logistic_p=",logistic_p) predict_y = logistic_inc_function(logistic_p, t) # print(predict_y) # error pred_e = target_y - predict_y start_time = datetime.datetime(2020, 1, 22) + datetime.timedelta(days=start_days) last_date = start_time + datetime.timedelta(days=(len(target_y) - fill_dates - 1)) last_date_str = last_date.strftime('%Y-%m-%d') # print(start_time) # print(fill_dates) # print(len(input_data)) # print(block_days) end_time = datetime.datetime(2020, 6, 30) inc = datetime.timedelta(days=1) dates = mat.dates.drange(start_time, end_time, inc) t2 = np.array([i + 1 for i in range(len(dates))]) predict_data_long = logistic_inc_function(logistic_p, t2) # print(dates) # print(target_y) # print(len(input_data)) # print(len(target_y)) plt.plot_date(dates, predict_data_long, label='Predictions') if fill_dates > 0: plt.plot_date(dates[:len(input_data) - fill_dates], target_y[:-fill_dates], label="Actual Cases") plt.plot_date(dates[len(input_data) - fill_dates:len(input_data)], target_y[len(input_data) - fill_dates:], label="Pseudo Cases") else: plt.plot_date(dates[:len(input_data)], target_y[:], label="Actual Cases") plt.xlabel('Date') plt.ylabel('Number of Cases') plt.title(title + "_cumulative_cases_" + last_date_str) plt.legend(loc='best') plt.rcParams['figure.figsize'] = (12, 8) plt.savefig(title + "_cumulative_cases_" + last_date_str + ".png", dpi=200) plt.show() plt.plot_date(dates[1:], getDailyInc(predict_data_long), label='Predictions') if fill_dates > 0: plt.plot_date(dates[1:len(input_data) - fill_dates], getDailyInc(target_y)[:-fill_dates], label="Actual Cases") plt.plot_date(dates[len(input_data) - fill_dates:len(input_data)], getDailyInc(target_y)[len(input_data) - fill_dates - 1:], label="Pseudo Cases") else: plt.plot_date(dates[1:len(input_data)], getDailyInc(target_y), label="Actual Cases") # plt.plot_date(dates[1:len(input_data)], getDailyInc(target_y), label="Actual Cases") plt.xlabel('Date') plt.ylabel('Number of Cases') plt.title(title + "_daily_cases_" + last_date_str) plt.legend(loc='best') plt.rcParams['figure.figsize'] = (12, 8) plt.savefig(title + "_daily_cases_" + last_date_str + ".png", dpi=200) plt.show()
import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import datetime as dt dates = [dt.datetime.today() + dt.timedelta(days=i) for i in range(10)] #10 days values = np.random.rand(len(dates)) plt.plot_date(mpl.dates.date2num(dates), values, linestyle='-') #connects points with lines plt.show()