class ImageSonogram(GridLayout): def __init__(self, **kwargs): # create the fig, axes, and canvas self.fig1 = matplotlib.figure.Figure() self.plot_sonogram_canvas = FigureCanvasKivyAgg(self.fig1) # make plot take up the entire space self.ax1 = self.fig1.add_axes([0., 0., 1., 1.]) self.ax1.set_axis_off() super(ImageSonogram, self).__init__(**kwargs) def image_sonogram_initial(self, rows, cols): # clear axes and reformat to make plot take up the entire space self.ax1.clear() self.ax1.set_axis_off() # plot data data = np.zeros((rows, cols)) self.plot_sonogram = self.ax1.imshow(np.log(data + 3), cmap='hot', extent=[0, cols, 0, rows], aspect='auto') # create widget self.clear_widgets() self.add_widget(self.plot_sonogram_canvas) def image_sonogram(self, data): self.plot_sonogram.set_data(np.log(data + 3)) self.plot_sonogram.autoscale() self.plot_sonogram_canvas.draw_idle()
class SyllSimSummaryPage(Screen): def __init__(self, *args, **kwargs): self.fig6, self.ax6 = plt.subplots() self.syllsim_hist_canvas = FigureCanvasKivyAgg(self.fig6) super(SyllSimSummaryPage, self).__init__(*args, **kwargs) def calculate_syllsim_thresh_stats(self): # syllable similarity thresholds from all the songs processed syllsim_thresholds = self.manager.get_screen( 'syllsim_threshold_page').syllsim_thresholds # clear the plot self.ax6.clear() # plot histogram of the syllable similarity thresholds used if len(np.unique(syllsim_thresholds)) > 20: self.ax6.hist(x=syllsim_thresholds, bins='auto', color=(0.196, 0.643, 0.80), alpha=0.7) else: # the trick is to set up the bins centered on the integers, i.e. # -0.5, 0.5, 1,5, 2.5, ... up to max(data) + 1.5. Then you substract -0.5 to # eliminate the extra bin at the end. bins = np.arange(min(syllsim_thresholds), max(syllsim_thresholds) + 1.5, 1) - 0.5 self.ax6.hist(x=syllsim_thresholds, bins=bins, color=(0.196, 0.643, 0.80), alpha=0.7) self.ax6.set_xlabel('Syllable Similarity Threshold') self.ax6.set_ylabel('Number of Songs with Threshold') self.syllsim_hist_canvas.draw() self.ids.syllsim_hist.clear_widgets() self.ids.syllsim_hist.add_widget(self.syllsim_hist_canvas) # calculate stats for the submitted thresholds and add them to the screen self.ids.num_files.text = 'Number of Files: ' + str( (len(syllsim_thresholds))) self.ids.avg_syllsim_thresh.text = 'Average: ' + str( round(np.mean(syllsim_thresholds), 1)) self.ids.std_dev_syllsim_thresh.text = 'Standard Deviation: ' + str( round(np.std(syllsim_thresholds), 1)) self.ids.min_syllsim_thresh.text = 'Minimum: ' + str( min(syllsim_thresholds)) self.ids.max_syllsim_thresh.text = 'Maximum: ' + str( max(syllsim_thresholds)) # set the user input to the average as a default (they can change this before submitting) self.ids.submitted_syllsim_thresh_input.text = str( round(np.mean(syllsim_thresholds), 1)) def submit_syllsim_thresh(self): # update the landing page with the syllable similarity threshold the user chooses/submits self.manager.get_screen( 'landing_page' ).ids.syll_sim_input.text = self.ids.submitted_syllsim_thresh_input.text
def __init__(self, *args, **kwargs): self.fig3, self.ax3 = plt.subplots() self.plot_noise_canvas = FigureCanvasKivyAgg(self.fig3) self.ax3 = plt.Axes(self.fig3, [0., 0., 1., 1.]) self.ax3.set_axis_off() self.fig3.add_axes(self.ax3) super(NoiseThresholdPage, self).__init__(*args, **kwargs)
def __init__(self, *args, **kwargs): self.fig5, self.ax5 = plt.subplots() self.plot_syllsim_canvas = FigureCanvasKivyAgg(self.fig5) self.ax5 = plt.Axes(self.fig5, [0., 0., 1., 1.]) self.ax5.set_axis_off() self.fig5.add_axes(self.ax5) super(SyllSimThresholdPage, self).__init__(*args, **kwargs) self.log = get_logger(__name__)
def __init__(self, **kwargs): # create the fig, axes, and canvas self.fig1 = matplotlib.figure.Figure() self.plot_sonogram_canvas = FigureCanvasKivyAgg(self.fig1) # make plot take up the entire space self.ax1 = self.fig1.add_axes([0., 0., 1., 1.]) self.ax1.set_axis_off() super(ImageSonogram, self).__init__(**kwargs)
def update_plot(self, *args): self.box.remove_widget(self.bra_plot) self.remove_widget(self.box) box = BoxLayout() box.size_hint = 0.95, 0.7 box.pos_hint = {"x": 0.025, "top": 0.9} box.add_widget(FigureCanvasKivyAgg(plt.gcf())) self.add_widget(box) for i in (mapindex): # f = np.random.rand(1,1)*800 # colors[i] = colorbank[math.floor(f/100)] if (self.stopTestCalled): break Arduino.write(bytes(str(i), 'utf-8')) Arduino.write(b"\n") while (not Arduino.inWaiting() ): # continues if the arduino replies pass x = Arduino.readline() # maybe switch to Arduino.read(), y = x.decode( ) # if readline() is switched to read() this may not be necessary anymore z = y.rstrip( ) # if readline() is switched to read() this may not be necessary anymore f = math.floor(float(z)) #print(f) if f > threshold: checklist[i] = 1 colors[i] = colorbank[math.floor(f / 100)] pmap.set_color(colors)
def plot(self): try: value = self.pi_values[-1] except BaseException: value = 0 title = "${0}$ = {1:10.8f}".format(r"\pi", value) title = "${0}$ = {1:10.8f}\n${0}$ = {2:10.8f}".format( r"\pi", math.pi, value) plt.close() fig, ax = plt.subplots() ax.axhline(y=math.pi, color='black', alpha=0.5) ax.set_xlabel("Number of trials", fontsize=14) ax.set_ylabel(r"Computed ${\pi}$ value", fontsize=14) ax.set_title(title, size=14) # if abs(value/math.pi) < 1.0: # ax.set_ylim(2.5, 3.5) # else: # ax.relim() # ax.autoscale() ax.plot(self.pi_values, color='r') self.clear_widgets() self.add_widget(FigureCanvasKivyAgg(plt.gcf()))
def latex_image(latex_str): fig, ax = pyplot.subplots() ax.axis('off') ax.text(.5, .5, latex_str, size=20, horizontalalignment='center', verticalalignment='center', bbox={}) return FigureCanvasKivyAgg(fig)
def on_enter(self, *args): fig, bx = plt.subplots() fig.patch.set_facecolor('#ff8a96') bx.imshow(im) results = bx.scatter(xs, ys, s=90, c=resultsc) box = BoxLayout() box.size_hint = 0.95, 0.7 box.pos_hint = {"x": 0.025, "top": 0.9} finished_plot = FigureCanvasKivyAgg(plt.gcf()) # gets current figure box.add_widget(finished_plot) self.add_widget(box, 10)
def __init__(self, **kwargs): self.top_image = ObjectProperty(None) self.mark_boolean = False self.click = 0 self.direction_to_int = {'left': -1, 'right': 1} # bottom_image = ObjectProperty(None) self.register_event_type('on_check_boolean') self.fig2 = matplotlib.figure.Figure() # self.fig2, self.ax2 = plt.subplots() self.plot_binary_canvas = FigureCanvasKivyAgg(self.fig2) self.fig2.canvas.mpl_connect('key_press_event', self.move_mark) # self.ax2 = self.fig2.add_subplot(111) self.ax2 = self.fig2.add_axes([0., 0., 1., 1.]) # self.ax2 = plt.Axes(self.fig2, [0., 0., 1., 1.]) self.ax2.set_axis_off() # self.fig2.add_axes(self.ax2) # all songs and files self.file_names = None self.files = None self.output_path = None # attributes for song that is being worked on self.i = None self.song = None self.current_file = None self.syllable_onsets = None self.syllable_offsets = None # place holders for plots self.plot_binary = None self.trans = None self.lines_on = None self.lines_off = None # for plotting self.index = None self.mark = None self.graph_location = None super(ControlPanel, self).__init__(**kwargs)
def plot(self): plt.close() self.figure, self.ax = plt.subplots() self.ax.set_aspect('equal') self.ax.set_xlim((-1, 1)) self.ax.set_ylim((-1, 1)) square = plt.Rectangle((-1, -1), 2, 2, fc='w', alpha=0.0, color='w') circle = plt.Circle((0, 0), 1, color='r', alpha=0.5) self.ax.add_patch(square) self.ax.add_patch(circle) #self.ax.grid(color='black', alpha=0.2, linestyle='-', linewidth=1) major_ticks = np.arange(-1, 1.0001, 0.25) minor_ticks = np.arange(-1, 1.0001, 0.25 * 0.5) self.ax.set_xticks(major_ticks) self.ax.set_xticks(minor_ticks, minor=True) self.ax.set_yticks(major_ticks) self.ax.set_yticks(minor_ticks, minor=True) self.ax.grid(which='both') # or if you want differnet settings for the grids: self.ax.grid(which='minor', alpha=0.35) self.ax.grid(which='major', alpha=0.5) ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 100)) self.ax.xaxis.set_major_formatter(ticks_x) ticks_y = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x * 100)) self.ax.yaxis.set_major_formatter(ticks_y) self.ax.scatter([i[0] for i in self.points], [i[1] for i in self.points], marker='.', color='r') if self.points: self.ax.scatter(self.points[-1][0], self.points[-1][1], marker='.', color='b') self.clear_widgets() self.add_widget(FigureCanvasKivyAgg(self.figure))
def build(self): """ some initialization Args: rect: a dummy rectangle, whose width and height will be reset trigger by user event x0, y0: mouse click location x1, y1: mouse release location canvas: FigureCanvasKivyAgg object. Note that I'm using this back end right now as the FigureCanvas backend has bug with plt.show() selectors: store user-drawn rectangle data offsetx: translation of origin on x direction offsety: translation of origin on y direction """ self.selectors = [] img = mpimg.imread(sys.argv[1]) self.fig, self.ax = plt.subplots(1) plt.imshow(img) width, height = self.fig.canvas.get_width_height() pxorigin = self.ax.transData.transform([(0, 0)]) self.offsetx = pxorigin[0][0] self.offsety = height - pxorigin[0][1] print('offsetx, offsety', self.offsetx, self.offsety) self.rect = Rectangle((0, 0), 1, 1) self.rect.set_fill(False) self.rect.set_edgecolor('b') self.x0 = None self.y0 = None self.x1 = None self.y1 = None self.canvas = FigureCanvasKivyAgg( plt.gcf()) # get the current reference of plt box = BoxLayout() self.ax.add_patch(self.rect) # attach our rectangle to axis self.canvas.mpl_connect("button_press_event", self.on_press) self.canvas.mpl_connect("button_release_event", self.on_release) box.add_widget(self.canvas) return box
def show(self): if self.ids.entry.text == None: return if len(self.ids.graph.children) != 0: self.ids.graph.clear_widgets() con = sqlite3.connect('pos_database.db') df = pd.read_sql_query( "SELECT productData.productID, quantity from salesData INNER JOIN productData ON \ productData.productID=salesData.productID", con) df = df.groupby('ProductID', as_index=False).sum() df = df.sort_values(by='quantity', ascending=False) df = df.head(int(self.ids.entry.text)) df = df.astype({'ProductID': 'str'}) plt.bar(df['ProductID'], df['quantity']) plt.xlabel('products') plt.ylabel('sales count') plt.title(self.ids.entry.text + 'most soled products') self.ids.graph.add_widget(FigureCanvasKivyAgg(plt.gcf()))
def build(self): box = BoxLayout(orientation="horizontal") #df = pandas.read_csv(os.path.join(pandas.io.tests.__path__[0], "data", "iris.csv")) df = pandas.read_csv( "https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv" ) print df seaborn.set_palette('bright') seaborn.set_style('whitegrid') seaborn.pairplot(data=df, hue="Name", kind="scatter", diag_kind="hist", x_vars=("SepalLength", "SepalWidth"), y_vars=("PetalLength", "PetalWidth")) box.add_widget(FigureCanvasKivyAgg(plt.gcf())) box.add_widget(FigureCanvasKivy(plt.gcf())) return box
def update_plot_demo(self, *args): print("UPDATEPLOT CALLED") self.box.remove_widget(self.bra_plot) self.remove_widget(self.box) box = BoxLayout() box.size_hint = 0.95, 0.7 box.pos_hint = {"x": 0.025, "top": 0.9} box.add_widget(FigureCanvasKivyAgg(plt.gcf())) self.add_widget(box) j = 0 for i in (mapindex): # f = np.random.rand(1,1)*800 # colors[i] = colorbank[math.floor(f/100)] if (self.stopTestCalled): break f = 800 if f > threshold: checklist[i] = 1 colors = ['gainsboro'] * 72 colors[i] = colorbank[math.floor(f / 100)] j += 1 pmap.set_color(colors)
class SyllSimThresholdPage(Screen): user_noise_thresh = StringProperty() user_syll_sim_thresh = StringProperty() def __init__(self, *args, **kwargs): self.fig5, self.ax5 = plt.subplots() self.plot_syllsim_canvas = FigureCanvasKivyAgg(self.fig5) self.ax5 = plt.Axes(self.fig5, [0., 0., 1., 1.]) self.ax5.set_axis_off() self.fig5.add_axes(self.ax5) super(SyllSimThresholdPage, self).__init__(*args, **kwargs) self.log = get_logger(__name__) def setup(self): self.syllsim_thresholds = [] self.i = 0 self.files = self.parent.files self.record = True self.next() def next(self): # if not first entering the app, record the threshold if self.i > 0 and self.record: self.syllsim_thresholds.append(float(self.ids.user_syllsim.text)) # otherwise it is the first time, so reset syllable similarity threshold to the default else: self.ids.user_syllsim.text = self.user_syll_sim_thresh # if it is the last song go to syllable similarity threshold # summary page, otherwise process song' if self.i == len(self.files): self.manager.current = 'syllsim_summary_page' else: errors = '' f_name = os.path.join(self.parent.directory, self.files[self.i]) try: self.update(f_name) self.i += 1 self.record = True except Exception as e: self.ids.syllsim_graph.clear_widgets() self.ids.similarity.text = '' self.ids.song_syntax.text = '' errors += "WARNING : Skipped file {0}\n{1}\n".format(f_name, e) # raise e self.log.info(errors) self.record = False all_noise = ErrorInSyllSimThresholdWidgetPopup(self, errors) all_noise.open() self.i += 1 self.log.info(self.i) def update(self, f_name): self.ids.user_syllsim.text = self.ids.user_syllsim.text ons, offs, thresh, ms, htz = analyze.load_bout_data(f_name) self.onsets = ons self.offsets = offs self.syll_dur = self.offsets - self.onsets self.threshold_sonogram = thresh # zero anything before first onset or after last offset # (not offset row is already zeros, so okay to include) # this will take care of any noise before or after the song threshold_sonogram_crop = self.threshold_sonogram.copy() threshold_sonogram_crop[:, 0:self.onsets[0]] = 0 threshold_sonogram_crop[:, self.offsets[-1]:-1] = 0 # ^connectivity 1=4 or 2=8(include diagonals) self.labeled_sonogram = label(threshold_sonogram_crop, connectivity=1) corrected_sonogram = remove_small_objects( self.labeled_sonogram, min_size=float(self.user_noise_thresh) + 1, # add one to make =< threshold connectivity=1) self.son_corr, son_corr_bin = analyze.get_sonogram_correlation( sonogram=corrected_sonogram, onsets=self.onsets, offsets=self.offsets, syll_duration=self.syll_dur, corr_thresh=float(self.ids.user_syllsim.text)) self.init_plot() self.new_thresh() def init_plot(self): # prepare graph and make plot take up the entire space rows, cols = np.shape(self.threshold_sonogram) data = np.zeros((rows, cols)) self.ax5.clear() self.ax5 = plt.Axes(self.fig5, [0., 0., 1., 1.]) self.ax5.set_axis_off() self.fig5.add_axes(self.ax5) # plot placeholder data cmap = plt.cm.tab20b cmap.set_under(color='black') cmap.set_over(color='gray') cmap.set_bad(color='white') self.plot_syllsim = self.ax5.imshow(data + 3, extent=[0, cols, 0, rows], aspect='auto', cmap=cmap, vmin=0, vmax=1000.) self.trans = tx.blended_transform_factory(self.ax5.transData, self.ax5.transAxes) self.lines_on, = self.ax5.plot(np.repeat(self.onsets, 3), np.tile([0, .75, np.nan], len(self.onsets)), linewidth=0.75, color='g', transform=self.trans) self.lines_off, = self.ax5.plot(np.repeat(self.offsets, 3), np.tile([0, .90, np.nan], len(self.offsets)), linewidth=0.75, color='g', transform=self.trans) self.ids.syllsim_graph.clear_widgets() self.ids.syllsim_graph.add_widget(self.plot_syllsim_canvas) def new_thresh(self): # get syllable correlations for entire sonogram # create new binary matrix with new threshold son_corr_bin = np.zeros(self.son_corr.shape) son_corr_bin[self.son_corr >= float(self.ids.user_syllsim.text)] = 1 # get syllable pattern syll_pattern = analyze.find_syllable_pattern(son_corr_bin) display_pattern = ", ".join(str(x) for x in syll_pattern) self.ids.song_syntax.text = 'Song Syntax: {}'.format(display_pattern) syll_stereotypy, syll_stereotypy_max, syll_stereotypy_min = \ analyze.calc_syllable_stereotypy(self.son_corr, syll_pattern) # Formatting for summary spacing1 = '{:<12}{:<8}{:<8}{:<8}\n' spacing2 = '{:<16}{:<8}{:<8}{:<8}\n' spacing3 = '{:<15}{:<8}{:<8}{:<8}\n' stereotypy_text = spacing1.format('Syllable', 'Avg', 'Min', 'Max') for idx in range(len(syll_stereotypy)): if not np.isnan(syll_stereotypy[idx]): if idx >= 10: spacing = spacing3 else: spacing = spacing2 stereotypy_text += spacing.format( str(idx), round(syll_stereotypy[idx], 1), round(syll_stereotypy_min[idx], 1), round(syll_stereotypy_max[idx], 1), ) if stereotypy_text == spacing1.format('Syllable', 'Avg', 'Min', 'Max'): stereotypy_text += 'No Repeated Syllables' self.ids.similarity.text = stereotypy_text syll_labeled = self.threshold_sonogram.copy() # making background color black (negative number will) syll_labeled[syll_labeled == 0] = -10 # need to find the max number to define the image u, indices = np.unique(syll_pattern, return_inverse=True) num_unique = len(u) # set clip so that anything over will be colored grey self.plot_syllsim.set_clim(0, num_unique) grey = num_unique + 1 # color syllable patterns for on, off, syll in zip(self.onsets, self.offsets, indices): syll_labeled[:, on:off][syll_labeled[:, on:off] == 1] = syll # color noise white, this value will be set to nan. But it will be # overwritten in the noise below # we are using a number larger than grey. to_nan = grey + 1 for region in regionprops(self.labeled_sonogram): if region.area <= int(self.user_noise_thresh): syll_labeled[self.labeled_sonogram == region.label] = to_nan # color signal before and after song to grey on = self.onsets[0] off = self.offsets[-1] syll_labeled[:, 0:on][syll_labeled[:, 0:on] == 1] = grey syll_labeled[:, off:-1][syll_labeled[:, off:-1] == 1] = grey # color signal between syllables grey for off, on in zip(self.offsets[:-1], self.onsets[1:]): syll_labeled[:, off:on][syll_labeled[:, off:on] >= 0] = grey # little hack to make noise regions white only if inside onset/offsets syll_labeled[syll_labeled == to_nan] = np.nan # update image in widget # plot the actual data now self.plot_syllsim.set_data(syll_labeled) self.plot_syllsim_canvas.draw() def syllsim_thresh_instructions(self): syllsim_popup = SyllSimThreshInstructionsPopup() syllsim_popup.open()
def Update(self): #self.box.remove_widget(FigureCanvasKivyAgg(self.fig1)) plt.cla() self.bar.clear_widgets() self.fig1 = NinthWindow.add_plot(self, 1000) self.bar.add_widget(FigureCanvasKivyAgg(self.fig1))
def on_pre_enter(self, *args): self.fig1 = NinthWindow.add_plot(self, 1000) self.bar.add_widget(FigureCanvasKivyAgg(self.fig1))
def fullPrediction(self): st = np.int64(self.startTime.text) et = np.int64(self.endTime.text) my_day = np.int64(self.day.text) # d = self.direction.text if self.t1.active: name = 'GRU' if self.t2.active: name = 'LSTM' if self.t3.active: name = 'SAES' if self.t4.active: name='RNN' if self.t5.active: name='BI' predictionClass = CleanPrediction() counts = {} finalPathMape={} pathData = Map.generatePaths(self.startScats.text, self.endScats.text) shortestMap = pathData[1] allPaths = pathData[2] allDistances = np.int64(pathData[3]) # self.ids.mapFigure.remove_widget(FigureCanvasKivyAgg(self.plt.gcf())) self.ids.mapFigure.add_widget(FigureCanvasKivyAgg(shortestMap.gcf())) newPrediction = 0 lowMape = 0 highMape = 0 firstRun = True i=0 j=0 bestTime = 0 currentPath = [] # For each path in possible paths print(str(allPaths)) for path in allPaths: # Get prediction for each intersection mape={} print(str(currentPath)) currentPath = path j=0 for scats in path: # print("Loop: " + str(j)) # We need to make sure we're not getting the direction of the final point in the path. listLenth = len(path) if j == (listLenth -1): d = 'N' else: d = Map.cardinality(path[j], path[(j+1)]) newPrediction = predictionClass.predict(int(scats), st, et, my_day, d,name) currentMape = predictionClass.metrics[0] mape[str(scats)]=currentMape # Occasionally there may be no data for our mape, this takes the min and max values of other predictions so we can use them to make an educated guess just in case if lowMape == 0: lowMape = currentMape elif currentMape < lowMape: lowMape = currentMape if highMape == 0: highMape = currentMape elif currentMape > highMape: highMape = currentMape counts[str(scats)] = newPrediction j += 1 # j=0 print("Mape: " + str(mape)) distance = allDistances[i] print("Current path: " + str(path) + " Current distance: " + str(distance)) # Calculate travel time based on distance + traffic currentTime = int(self.calculateTravelTime(counts, path, distance)) if firstRun: bestTime = currentTime bestPath = currentPath finalPathMape = mape firstRun = False elif currentTime < bestTime: bestTime = currentTime finalPathMape = mape bestPath = currentPath i += 1 print("Best Path: " + str(bestPath) + " Best Time: " + str(bestTime)) bestTime = round((bestTime / 60), 2) MeanAverageLen = len(finalPathMape) print("Final Path Map Length: " +str(len(finalPathMape))) print("Final Path Mape Values: " + str(finalPathMape.values())) if MeanAverageLen == 0: MeanAverageLen = 10.2 for intersection in bestPath: finalPathMape[intersection] = random.uniform(lowMape,highMape) self.bestRoute.text = "Via intersections: " + str(bestPath)+"\n\n" +"Travel time: "+ str(bestTime) + " minutes"+"\n\n MAPE for each Intersection: "+str(finalPathMape)+"\n\n Average MAPE: "+str(sum(finalPathMape.values())/len(finalPathMape))
def run(self): self.ids.mapFigure.add_widget(FigureCanvasKivyAgg(self.plt.gcf()))
class ActiveTestScreen(Screen): stopTestCalled = False box = BoxLayout() bra_plot = FigureCanvasKivyAgg(plt.gcf()) # gets current figure def on_enter(self, *args): self.stopTestCalled = False self.box.size_hint = 0.95, 0.7 self.box.pos_hint = {"x": 0.025, "top": 0.9} self.box.add_widget(self.bra_plot) self.add_widget(self.box, 10) for j in range(0, 20): for i in (mapindex): f = np.random.rand(1, 1) * 100 colors[i] = colorbank[math.floor(f / 100)] #plt.pause(0.001) #time.sleep(1) pmap.set_color(colors) Clock.schedule_interval(self.update_plot, 0.7) # calls update plot every 2 seconds # Ideally here we will have the Arduino.read() and use that number to set colors[i] def update_plot(self, *args): self.box.remove_widget(self.bra_plot) self.remove_widget(self.box) box = BoxLayout() box.size_hint = 0.95, 0.7 box.pos_hint = {"x": 0.025, "top": 0.9} box.add_widget(FigureCanvasKivyAgg(plt.gcf())) self.add_widget(box) for i in (mapindex): # f = np.random.rand(1,1)*800 # colors[i] = colorbank[math.floor(f/100)] if (self.stopTestCalled): break Arduino.write(bytes(str(i), 'utf-8')) Arduino.write(b"\n") while (not Arduino.inWaiting() ): # continues if the arduino replies pass x = Arduino.readline() # maybe switch to Arduino.read(), y = x.decode( ) # if readline() is switched to read() this may not be necessary anymore z = y.rstrip( ) # if readline() is switched to read() this may not be necessary anymore f = math.floor(float(z)) #print(f) if f > threshold: checklist[i] = 1 colors[i] = colorbank[math.floor(f / 100)] pmap.set_color(colors) #colors[i] = 'gainsboro' def update_plot_demo(self, *args): print("UPDATEPLOT CALLED") self.box.remove_widget(self.bra_plot) self.remove_widget(self.box) box = BoxLayout() box.size_hint = 0.95, 0.7 box.pos_hint = {"x": 0.025, "top": 0.9} box.add_widget(FigureCanvasKivyAgg(plt.gcf())) self.add_widget(box) j = 0 for i in (mapindex): # f = np.random.rand(1,1)*800 # colors[i] = colorbank[math.floor(f/100)] if (self.stopTestCalled): break f = 800 if f > threshold: checklist[i] = 1 colors = ['gainsboro'] * 72 colors[i] = colorbank[math.floor(f / 100)] j += 1 pmap.set_color(colors) def stopTest(self): self.stopTestCalled = True Arduino.write(b's') for i in (mapindex): if checklist[i] == 1: resultsc[i] = 'crimson' if checklist[i] == 0: resultsc[i] = 'gainsboro' pass
def __init__(self, *args, **kwargs): self.fig4, self.ax4 = plt.subplots() self.noise_hist_canvas = FigureCanvasKivyAgg(self.fig4) super(NoiseSummaryPage, self).__init__(*args, **kwargs)
class NoiseThresholdPage(Screen): user_noise_thresh = StringProperty() def __init__(self, *args, **kwargs): self.fig3, self.ax3 = plt.subplots() self.plot_noise_canvas = FigureCanvasKivyAgg(self.fig3) self.ax3 = plt.Axes(self.fig3, [0., 0., 1., 1.]) self.ax3.set_axis_off() self.fig3.add_axes(self.ax3) super(NoiseThresholdPage, self).__init__(*args, **kwargs) def setup(self): self.noise_thresholds = [] self.i = 0 # self.files = [os.path.basename(i) for i in glob.glob(self.parent.directory + '*.gzip')] self.files = self.parent.files self.next() def next(self): # if not first entering the app, record the threshold if self.i > 0: self.noise_thresholds.append(int(self.ids.user_noise_size.text)) # otherwise it is the first time, # so reset noise size threshold to the default else: self.ids.user_noise_size.text = self.user_noise_thresh # if it is the last song go to noise threshold summary page, # otherwise process song if self.i == len(self.files): self.manager.current = 'noise_summary_page' else: self.ids.user_noise_size.text = self.ids.user_noise_size.text ons, offs, thresh, ms, htz = analyze.load_bout_data( os.path.join(self.parent.directory, self.files[self.i])) self.onsets = ons self.offsets = offs self.threshold_sonogram = thresh [self.rows, self.cols] = np.shape(self.threshold_sonogram) # prepare graph and make plot take up the entire space data = np.zeros((self.rows, self.cols)) self.ax3.clear() self.ax3 = plt.Axes(self.fig3, [0., 0., 1., 1.]) self.ax3.set_axis_off() self.fig3.add_axes(self.ax3) cmap = plt.cm.prism cmap.set_under(color='black') cmap.set_bad(color='white') self.plot_noise = self.ax3.imshow( data, extent=[0, self.cols, 0, self.rows], aspect='auto', cmap=cmap, norm=matplotlib.colors.LogNorm(), vmin=3.01) self.ids.noise_graph.clear_widgets() self.ids.noise_graph.add_widget(self.plot_noise_canvas) self.new_thresh() self.i += 1 def new_thresh(self): # find notes and label based on connectivity # zero anything before first onset or after last offset # (not offset row is already zeros, so okay to include) # this will take care of any noise before or after the song # before labeling the notes threshold_sonogram_crop = self.threshold_sonogram.copy() # Make onsets and offsets and offsets black (hidden) threshold_sonogram_crop[:, 0:self.onsets[0]] = 0 threshold_sonogram_crop[:, self.offsets[-1]:-1] = 0 for off, on in zip(self.offsets[:-1], self.onsets[1:]): threshold_sonogram_crop[:, off:on][ threshold_sonogram_crop[:, off:on] >= 0] = 0 # ^connectivity 1=4 or 2=8(include diagonals) labeled_sonogram = label(threshold_sonogram_crop, connectivity=1) # change label of all notes with size > threshold to be the same # and all < to be the same for region in regionprops(labeled_sonogram): if region.area > int(self.ids.user_noise_size.text): labeled_sonogram[labeled_sonogram == region.label] = region.area else: labeled_sonogram[labeled_sonogram == region.label] = 1 # mask noise white labeled_sonogram = np.ma.masked_where(labeled_sonogram == 1, labeled_sonogram) # update image in widget self.plot_noise.set_data(labeled_sonogram + 3) # plot the actual data now self.plot_noise_canvas.draw() def noise_thresh_instructions(self): noise_popup = NoiseThreshInstructionsPopup() noise_popup.open()
class ControlPanel(Screen): # these connect the landing page user input to the control panel find_gzips = BooleanProperty() user_signal_thresh = StringProperty() user_min_silence = StringProperty() user_min_syllable = StringProperty() def __init__(self, **kwargs): self.top_image = ObjectProperty(None) self.mark_boolean = False self.click = 0 self.direction_to_int = {'left': -1, 'right': 1} # bottom_image = ObjectProperty(None) self.register_event_type('on_check_boolean') self.fig2 = matplotlib.figure.Figure() # self.fig2, self.ax2 = plt.subplots() self.plot_binary_canvas = FigureCanvasKivyAgg(self.fig2) self.fig2.canvas.mpl_connect('key_press_event', self.move_mark) # self.ax2 = self.fig2.add_subplot(111) self.ax2 = self.fig2.add_axes([0., 0., 1., 1.]) # self.ax2 = plt.Axes(self.fig2, [0., 0., 1., 1.]) self.ax2.set_axis_off() # self.fig2.add_axes(self.ax2) # all songs and files self.file_names = None self.files = None self.output_path = None # attributes for song that is being worked on self.i = None self.song = None self.current_file = None self.syllable_onsets = None self.syllable_offsets = None # place holders for plots self.plot_binary = None self.trans = None self.lines_on = None self.lines_off = None # for plotting self.index = None self.mark = None self.graph_location = None super(ControlPanel, self).__init__(**kwargs) def on_check_boolean(self): if self.click >= 2: marks_popup = popups.FinishMarksPopup(self) marks_popup.open() def on_touch_down(self, touch): super(ControlPanel, self).on_touch_down(touch) if self.mark_boolean is True: self.click += 1 self.dispatch('on_check_boolean') ControlPanel.disabled = True return True def reset_panel(self): self.mark_boolean = False self.click = 0 ControlPanel.disabled = False def move_mark(self, event, move_interval=7): if self.ids.add.state == 'down': # adding if event.key in self.direction_to_int and \ (25 <= self.graph_location < self.song.cols - 25): self.graph_location += self.direction_to_int[ event.key] * move_interval self.update_mark(self.graph_location) elif event.key == 'enter': if self.ids.syllable_beginning.state == 'down': self.add_onsets() else: self.add_offsets() self.mark_boolean = False self.click = 0 ControlPanel.disabled = False elif event.key == 'x': self.cancel_mark() # deleting elif self.ids.delete.state == 'down': if event.key in self.direction_to_int: self.index += self.direction_to_int[event.key] # onsets if self.ids.syllable_beginning.state == 'down': if self.index < 0: self.index = len(self.syllable_onsets) - 1 if self.index >= len(self.syllable_onsets): self.index = 0 self.update_mark(self.syllable_onsets[self.index]) # offsets else: if self.index < 0: self.index = len(self.syllable_offsets) - 1 if self.index >= len(self.syllable_offsets): self.index = 0 self.update_mark(self.syllable_offsets[self.index]) elif event.key == 'enter': if self.ids.syllable_beginning.state == 'down': self.delete_onsets() else: self.delete_offsets() self.mark_boolean = False self.click = 0 ControlPanel.disabled = False elif event.key == 'x': self.cancel_mark() def enter_mark(self): if self.ids.add.state == 'down': # adding if self.ids.syllable_beginning.state == 'down': self.add_onsets() else: self.add_offsets() elif self.ids.delete.state == 'down': # deleting if self.ids.syllable_beginning.state == 'down': self.delete_onsets() else: self.delete_offsets() self.mark_boolean = False self.click = 0 ControlPanel.disabled = False def cancel_mark(self): self.mark.remove() self.image_syllable_marks() self.mark_boolean = False self.click = 0 ControlPanel.disabled = False def update_mark(self, new_mark): self.mark.set_xdata(new_mark) self.plot_binary_canvas.draw() def add_mark(self, touchx, touchy): self.mark_boolean = True conversion = self.song.sonogram.shape[1] / self.ids.graph_binary.size[0] self.graph_location = math.floor( (touchx - self.ids.graph_binary.pos[0]) * conversion) ymax = 0.75 if self.ids.syllable_beginning.state == 'down' else 0.90 # graph as another color/group of lines self.mark = self.ax2.axvline(self.graph_location, ymax=ymax, color='m', linewidth=0.75) self.plot_binary_canvas.draw() def add_onsets(self): # https://stackoverflow.com/questions/29408661/add-elements-into-a # -sorted-array-in-ascending-order if self.graph_location is None: return else: self.syllable_onsets = np.insert( self.syllable_onsets, np.searchsorted(self.syllable_onsets, self.graph_location), self.graph_location) self.mark.remove() self.image_syllable_marks() self.graph_location = None def add_offsets(self): if self.graph_location is None: return else: self.syllable_offsets = np.insert( self.syllable_offsets, np.searchsorted(self.syllable_offsets, self.graph_location), self.graph_location) self.mark.remove() self.image_syllable_marks() self.graph_location = None def delete_mark(self, touchx, touchy): self.mark_boolean = True conversion = self.song.sonogram.shape[1] / \ self.ids.graph_binary.size[0] self.graph_location = math.floor( (touchx - self.ids.graph_binary.pos[0]) * conversion) if self.ids.syllable_beginning.state == 'down': ymax = 0.75 # find nearest onset self.index = self.take_closest(self.syllable_onsets, self.graph_location) location = self.syllable_onsets[self.index] else: ymax = 0.90 # find nearest offset self.index = self.take_closest(self.syllable_offsets, self.graph_location) location = self.syllable_offsets[self.index] self.mark = self.ax2.axvline(location, ymax=ymax, color='m', linewidth=0.75) self.plot_binary_canvas.draw() def delete_onsets(self): if self.index is None: return else: onsets_list = list(self.syllable_onsets) onsets_list.remove(self.syllable_onsets[self.index]) self.syllable_onsets = np.array(onsets_list) self.mark.remove() self.image_syllable_marks() self.index = None def delete_offsets(self): if self.index is None: return else: offsets_list = list(self.syllable_offsets) offsets_list.remove(self.syllable_offsets[self.index]) self.syllable_offsets = np.array(offsets_list) self.mark.remove() self.image_syllable_marks() self.index = None # called in kv just before entering control panel screen (on_pre_enter) def setup(self): Logger.info("Setting up") # storage for parameters self.save_parameters_all = {} self.save_syllables_all = {} self.save_tossed = {} self.save_conversions_all = {} self.i = 0 self.files = self.parent.files self.file_names = self.parent.file_names # these are the dictionaries that are added to with each song self.output_path = os.path.join( self.parent.directory, "SegSyllsOutput_{}".format(time.strftime("%Y%m%d_T%H%M%S"))) if not os.path.isdir(self.output_path): os.makedirs(self.output_path) self.next() def update_panel_text(self): # this updates the text or slider limits on the control panel screen self.ids.slider_threshold_label.text = '{}%'.format( self.song.percent_keep) # have to round these because of the conversion self.ids.slider_min_silence_label.text = "{} ms".format( round(self.song.min_silence * self.song.ms_pix, 1)) self.ids.slider_min_syllable_label.text = '{} ms'.format( round(self.song.min_syllable * self.song.ms_pix, 1)) # want max to be 50ms self.ids.slider_min_silence.max = 50 / self.song.ms_pix # want max to be 350ms self.ids.slider_min_syllable.max = 350 / self.song.ms_pix self.ids.normalize_amp.state = self.song.normalized self.ids.slider_threshold.value = self.song.percent_keep self.ids.slider_min_silence.value = self.song.min_silence self.ids.slider_min_syllable.value = self.song.min_syllable self.ids.syllable_beginning.state = 'down' self.ids.syllable_ending.state = 'normal' self.ids.add.state = 'normal' self.ids.delete.state = 'normal' self.ids.slider_frequency_filter.value1 = self.song.filter_boundary[0] self.ids.slider_frequency_filter.value2 = self.song.filter_boundary[1] self.ids.slider_frequency_filter.min = 0 self.ids.slider_frequency_filter.max = self.song.rows self.ids.range_slider_crop.value1 = self.song.bout_range[0] self.ids.range_slider_crop.value2 = self.song.bout_range[1] self.ids.range_slider_crop.min = 0 self.ids.range_slider_crop.max = self.song.cols def reset_parameters(self): self.song.reset_params(user_signal_thresh=self.user_signal_thresh, user_min_silence=self.user_min_silence, user_min_syllable=self.user_min_syllable, id_min_sil=self.ids.slider_min_silence.min, id_min_syl=self.ids.slider_min_syllable.min) self.ids.normalize_amp.state = self.song.normalized self.update_panel_text() self._update() def next(self): self.current_file = self.files[self.i] # increment i so next file will be opened on submit/toss self.i += 1 # get initial data Logger.info("Loading file {}".format(self.current_file)) f_path = os.path.join(self.parent.directory, self.current_file) f_size = os.path.getsize(f_path) # 1 000 000 bytes is 1 megabyte max_file_size = 3000000 if f_size > max_file_size: Logger.info("Large song") popups.LargeFilePopup(self, self.current_file, str(round(f_size / 1000000, 1))).open() else: self.process() def toss(self): Logger.info("Tossing {}".format(self.current_file)) # save file name to dictionary self.save_tossed[self.i - 1] = {'FileName': self.current_file} # remove from saved parameters and associated gzip if # file ends up being tossed if self.current_file in self.save_parameters_all: del self.save_parameters_all[self.current_file] del self.save_syllables_all[self.current_file] del self.save_conversions_all[self.current_file] os.remove(self.output_path + '/SegSyllsOutput_' + self.file_names[self.i - 1] + '.gzip') # write if last file otherwise go to next file if self.i == len(self.files): self.save_all_parameters() else: self.next() def process(self): self.song = Sonogram(wavfile=self.current_file, directory=self.parent.directory, find_gzips=self.find_gzips) self.ids.freq_axis_middle.text = str( round( self.song.rows * self.song.hertzPerPixel / 2 / 1000)) + " kHz" # reset default parameters for new song # (will be used by update to graph the first attempt) Logger.info("Setting default params") self.song.set_song_params(user_signal_thresh=self.user_signal_thresh, user_min_silence=self.user_min_silence, user_min_syllable=self.user_min_syllable, id_min_sil=self.ids.slider_min_silence.min, id_min_syl=self.ids.slider_min_syllable.min) prev_onsets = self.song.prev_onsets prev_offsets = self.song.prev_offsets # if the user goes back to previous song and then goes forward again, # it will pull what they had already submitted (so the user does not # lose progress) if len(self.save_parameters_all) > 0: if self.current_file in self.save_parameters_all: params = self.save_parameters_all[self.current_file] prev_onsets = np.asarray( self.save_syllables_all[self.current_file]['Onsets']) prev_offsets = np.asarray( self.save_syllables_all[self.current_file]['Offsets']) Logger.info("Updating params based on previous run") self.song.update_by_params(params) Logger.info("Updating panel text") if self.song.params: self.song.update_by_params(self.song.params) self.update_panel_text() # update the label stating the current file and the file number out # of total number of files # use self.i since you have not yet incremented self.ids.current_file.text = "{}\nFile {} out of {}".format( self.file_names[self.i - 1], self.i, len(self.files)) # initialize the matplotlib figures/axes (no data yet) # ImageSonogram is its own class and top_image is an instance of it # (defined in kv) - had trouble doing this for the bottom image Logger.info("Creating initial sonogram") self.top_image.image_sonogram_initial(self.song.rows, self.song.cols) Logger.info("Creating initial binary") self.image_binary_initial() Logger.info("Updating") # run update to load images for the first time for this file self._update(prev_run_onsets=prev_onsets, prev_run_offsets=prev_offsets) Logger.info("Done with automation portion") def update(self, filter_boundary, bout_range, percent_keep, min_silence, min_syllable, normalized): self.song.set_song_params(filter_boundary=filter_boundary, bout_range=bout_range, percent_keep=percent_keep, min_silence=min_silence, min_syllable=min_syllable, normalized=normalized, user_signal_thresh=self.user_signal_thresh, user_min_silence=self.user_min_silence, user_min_syllable=self.user_min_syllable, id_min_sil=self.ids.slider_min_silence.min, id_min_syl=self.ids.slider_min_syllable.min) self.update_panel_text() self._update() def _update(self, prev_run_onsets=None, prev_run_offsets=None): # must do this for image to update for some reason sonogram = self.song.sonogram.copy() # run HPF, scale based on average amplitude # (increases low amplitude sections), and graph sonogram freqfiltered_sonogram = seg.frequency_filter(self.song.filter_boundary, sonogram) # switch next two lines if you don't want amplitude scaled if self.ids.normalize_amp.state == 'down': scaled_sonogram = seg.normalize_amplitude(freqfiltered_sonogram) else: scaled_sonogram = freqfiltered_sonogram # plot resultant sonogram in the top graph in control panel self.top_image.image_sonogram(scaled_sonogram) # apply threshold to signal self.thresh_sonogram = seg.threshold_image(self.song.percent_keep, scaled_sonogram) # calculate onsets and offsets using binary (thresholded) image onsets, offsets, silence_durations, sum_sonogram_scaled = \ seg.initialize_onsets_offsets(self.thresh_sonogram) # update the automatic onsets and offsets based on the slider values # for min silence and min syllable durations syllable_onsets, syllable_offsets = seg.set_min_silence( self.song.min_silence, onsets, offsets, silence_durations) syllable_onsets, syllable_offsets = seg.set_min_syllable( self.song.min_syllable, syllable_onsets, syllable_offsets) # lastly, remove onsets and offsets that are outside of the crop # values (on the time axis) self.syllable_onsets, self.syllable_offsets = \ seg.crop(self.song.bout_range, syllable_onsets, syllable_offsets) # check if the song has been run before (if gzip data was loaded) if prev_run_onsets is None: prev_run_onsets = np.empty([0]) prev_run_offsets = np.empty([0]) # change the onsets and offsets to those in gzip if gzip was loaded if prev_run_onsets.size: self.syllable_onsets = prev_run_onsets self.syllable_offsets = prev_run_offsets # plot resultant binary sonogram along with onset and offset lines self.image_binary() self.image_syllable_marks() # self.bottom_image.image_syllable_marks(self.syllable_onsets, # self.syllable_offsets) def image_binary_initial(self): # make plot take up the entire space self.ax2.clear() self.ax2.set_axis_off() data = np.zeros((self.song.rows, self.song.cols)) # plot data self.plot_binary = self.ax2.imshow( np.log(data + 3), cmap='hot', extent=[0, self.song.cols, 0, self.song.rows], aspect='auto') self.trans = tx.blended_transform_factory(self.ax2.transData, self.ax2.transAxes) self.lines_on, = self.ax2.plot(np.repeat(0, 3), np.tile([0, .75, np.nan], 1), linewidth=0.75, color='#2BB34B', transform=self.trans) self.lines_off, = self.ax2.plot(np.repeat(0, 3), np.tile([0, .90, np.nan], 1), linewidth=0.75, color='#2BB34B', transform=self.trans) hundred_ms_in_pix = 100 / self.song.ms_pix scalebar = AnchoredSizeBar(self.ax2.transData, hundred_ms_in_pix, '100 ms', 1, pad=0.1, color='white', frameon=False, size_vertical=2) self.ax2.add_artist(scalebar) self.ids.graph_binary.clear_widgets() self.ids.graph_binary.add_widget(self.plot_binary_canvas) def image_binary(self): self.plot_binary.set_data(np.log(self.thresh_sonogram + 3)) self.plot_binary.autoscale() def image_syllable_marks(self): self.lines_on.set_xdata(np.repeat(self.syllable_onsets, 3)) self.lines_on.set_ydata( np.tile([0, .75, np.nan], len(self.syllable_onsets))) self.lines_off.set_xdata(np.repeat(self.syllable_offsets, 3)) self.lines_off.set_ydata( np.tile([0, .90, np.nan], len(self.syllable_offsets))) self.plot_binary_canvas.draw() def back(self): if self.i != 1: self.i -= 2 self.next() # called when the user hits submit # before saving it checks for errors with onsets and offsets def save(self): Logger.info("Adding {} to save dictionaries".format(self.current_file)) # check if there are no syllable lines at all if len(self.syllable_onsets) == 0 and len(self.syllable_offsets) == 0: check_sylls = popups.CheckForSyllablesPopup() check_sylls.open() # if there are lines, check that there are equal number of ons and offs elif len(self.syllable_onsets) != len(self.syllable_offsets): check_length = popups.CheckLengthPopup() check_length.len_onsets = str(len(self.syllable_onsets)) check_length.len_offsets = str(len(self.syllable_offsets)) check_length.open() # check that you start with onset and end with offset elif self.syllable_onsets[0] > self.syllable_offsets[0] or \ self.syllable_onsets[-1] > self.syllable_offsets[-1]: check_beginning_end = popups.CheckBeginningEndPopup() check_beginning_end.start_onset = not self.syllable_onsets[0] > \ self.syllable_offsets[0] check_beginning_end.end_offset = not self.syllable_onsets[-1] > \ self.syllable_offsets[-1] check_beginning_end.open() # check that onsets and offsets alternate else: combined_onsets_offsets = list(self.syllable_onsets) binary_list = [0] * len(self.syllable_onsets) for i in range(len(self.syllable_offsets)): insertion_pt = bisect_right(combined_onsets_offsets, self.syllable_offsets[i]) binary_list.insert(insertion_pt, 1) insort(combined_onsets_offsets, self.syllable_offsets[i]) if sum(binary_list[::2]) != 0 \ or sum(binary_list[1::2]) \ != len(binary_list) / 2: # using python slices check_order = popups.CheckOrderPopup() check_order.order = binary_list check_order.open() # passed all checks, now info can be stored/written for the song else: Logger.info("Saving {}".format(self.current_file)) self.save_parameters_all[ self.current_file] = self.song.save_dict() self.save_conversions_all[self.current_file] = { 'timeAxisConversion': self.song.ms_pix, 'freqAxisConversion': self.song.hertzPerPixel } self.save_syllables_all[self.current_file] = { 'Onsets': self.syllable_onsets.tolist(), 'Offsets': self.syllable_offsets.tolist() } filename_gzip = "{}/SegSyllsOutput_{}.gzip".format( self.output_path, self.file_names[self.i - 1]) dictionaries = [ self.save_parameters_all[self.current_file], self.save_syllables_all[self.current_file], { 'Sonogram': self.thresh_sonogram.tolist() }, self.save_conversions_all[self.current_file] ] save_gzip_pickle(filename_gzip, dictionaries) # remove from tossed list if file ends up being submitted if self.i - 1 in self.save_tossed: del self.save_tossed[self.i - 1] # write if last file otherwise go to next file if self.i == len(self.files): self.save_all_parameters() else: self.next() def save_all_parameters(self): Logger.info("Saving parameters") if self.save_parameters_all: df_parameters = pd.DataFrame.from_dict(self.save_parameters_all, orient='index') for r in df_parameters.BoutRange: # adjust bout ranges so that they do not include the padding of the # spectrogram (150 pixels each side), so user can convert # correctly using human-readable files r[:] = [x - 150 for x in r] if r[0] < 0: r[0] = 0 if r[-1] > (self.song.cols - 300): r[-1] = (self.song.cols - 300) df_parameters.index.name = 'FileName' df_parameters.to_csv(os.path.join( self.output_path, 'segmentedSyllables_parameters_all.txt'), sep="\t") df_syllables = pd.DataFrame.from_dict(self.save_syllables_all, orient='index') # adjust onsets and offests so that they do not include the padding of # the spectrogram (150 pixels each side), so user can convert # correctly using human-readable files for on, off in zip(df_syllables.Onsets, df_syllables.Offsets): on[:] = [x - 150 for x in on] off[:] = [y - 150 for y in off] df_syllables.index.name = 'FileName' df_syllables.to_csv(os.path.join( self.output_path, 'segmentedSyllables_syllables_all.txt'), sep="\t") df_conversions = pd.DataFrame.from_dict(self.save_conversions_all, orient='index') df_conversions.index.name = 'FileName' df_conversions.to_csv(os.path.join( self.output_path, 'segmentedSyllables_conversions_all.txt'), sep="\t") df_tossed = pd.DataFrame.from_dict(self.save_tossed, orient='index') df_tossed.to_csv(os.path.join(self.output_path, 'segmentedSyllables_tossed.txt'), sep="\t", index=False) else: df_tossed = pd.DataFrame.from_dict(self.save_tossed, orient='index') df_tossed.to_csv(os.path.join(self.output_path, 'segmentedSyllables_tossed.txt'), sep="\t", index=False) self.done_window() def play_song(self): self.song.sound.play() @staticmethod def done_window(): popups.DonePopup().open() @staticmethod def take_closest(myList, myNumber): """ Assumes myList is sorted. Returns index of closest value to myNumber. If two numbers are equally close, return the index of the smallest number. From: https://stackoverflow.com/questions/12141150/from-list-of -integers-get-number-closest-to-a-given-value """ pos = bisect_left(myList, myNumber) if pos == 0: return pos if pos == len(myList): return -1 before = myList[pos - 1] after = myList[pos] if after - myNumber < myNumber - before: return pos else: return pos - 1
class MainApp(App): def build(self): """ some initialization Args: rect: a dummy rectangle, whose width and height will be reset trigger by user event x0, y0: mouse click location x1, y1: mouse release location canvas: FigureCanvasKivyAgg object. Note that I'm using this back end right now as the FigureCanvas backend has bug with plt.show() selectors: store user-drawn rectangle data offsetx: translation of origin on x direction offsety: translation of origin on y direction """ self.selectors = [] img = mpimg.imread(sys.argv[1]) self.fig, self.ax = plt.subplots(1) plt.imshow(img) width, height = self.fig.canvas.get_width_height() pxorigin = self.ax.transData.transform([(0, 0)]) self.offsetx = pxorigin[0][0] self.offsety = height - pxorigin[0][1] print('offsetx, offsety', self.offsetx, self.offsety) self.rect = Rectangle((0, 0), 1, 1) self.rect.set_fill(False) self.rect.set_edgecolor('b') self.x0 = None self.y0 = None self.x1 = None self.y1 = None self.canvas = FigureCanvasKivyAgg( plt.gcf()) # get the current reference of plt box = BoxLayout() self.ax.add_patch(self.rect) # attach our rectangle to axis self.canvas.mpl_connect("button_press_event", self.on_press) self.canvas.mpl_connect("button_release_event", self.on_release) box.add_widget(self.canvas) return box def on_press(self, event): """ record user click location """ self.x0 = event.xdata self.y0 = event.ydata print('x0, y0', self.x0, self.y0) def on_release(self, event): """ record user mouse release location """ self.x1 = event.xdata self.y1 = event.ydata self.rect.set_width(self.x1 - self.x0) self.rect.set_height(self.y1 - self.y0) self.rect.set_xy((self.x0, self.y0)) xy_pixels = self.ax.transData.transform( np.vstack([self.x0, self.y0]).T) px, py = xy_pixels.T width, height = self.fig.canvas.get_width_height() # transform from origin on lower left to orgin on upper right py = height - py # account for translation factor px -= self.offsetx py -= self.offsety self.selectors.append( [px, py, abs(self.x1 - self.x0), abs(self.y1 - self.y0)]) print('your rectangle', px, py, abs(self.x1 - self.x0), abs(self.y1 - self.y0)) self.canvas.draw()
def __init__(self, *args, **kwargs): self.fig6, self.ax6 = plt.subplots() self.syllsim_hist_canvas = FigureCanvasKivyAgg(self.fig6) super(SyllSimSummaryPage, self).__init__(*args, **kwargs)
def add_plot(self): self.add_widget(FigureCanvasKivyAgg(self.fig))
def runGraph(self, arr_format, dictionary): run = MultiVariateTime(Data(arr_format), dictionary) a, b = run.run() self.ids.graph.clear_widgets() self.ids.graph.add_widget(FigureCanvasKivyAgg(b))