def expand_peak_detection_new(self, entity1, entity2): peak_years = utils.find_peaks(entity1, entity2, self.year_to_model) longest_sequence = utils.find_longest_sequence(peak_years) if not longest_sequence: return None middle_year = utils.get_middle_year(longest_sequence[0], longest_sequence[-1]) related_tuples1 = self.qe_single_entity.expand_entity(entity1, middle_year, topk=100) related_tuples2 = self.qe_single_entity.expand_entity(entity2, middle_year, topk=100) if related_tuples1 is None or related_tuples2 is None: # if one of the terms was not expanded return None related_tuples = [] for tup in related_tuples1: peak_years = utils.find_peaks(entity1, tup[0], self.year_to_model) if utils.is_overlapping(peak_years, longest_sequence): related_tuples.append(tup) for tup in related_tuples2: peak_years = utils.find_peaks(entity2, tup[0], self.year_to_model) if utils.is_overlapping(peak_years, longest_sequence): related_tuples.append(tup) if related_tuples is None: # if there are no relevant related terms return None # reorder the related terms by (mutual) similarity with both entities heap = MaxEntitiesHeap(self.k * 2, [entity1, entity2]) w2v_model = self.year_to_model[middle_year] for tup in related_tuples: term = tup[0] sim1 = w2v_model.model.similarity(term, entity1) sim2 = w2v_model.model.similarity(term, entity2) mutual_similarity = sim1 + sim2 if term in (entity1, entity2) or term in [ tup[1] for tup in heap.heap ]: # if this term already exists continue heap.add(mutual_similarity, term) expansions = [] for obj in heap.heap: expansions.append(obj[1]) return ' '.join(expansions)
def __calculate_plates(self): """ Calculates the frames where the average lightness is high. The result is set to the peaks attribute. Method just for easy reading. """ if self.light_history is None: raise ValueError('Counter is not initialized') # The limits for lightness in HLS are 0 and 255 by default self.light_history = normalize(self.light_history, 0, 255) self.peaks = find_peaks(self.light_history)
def do_detect(self, dataframe: pd.DataFrame) -> TimeSeries: data = utils.cut_dataframe(dataframe) data = data['value'] pat_data = self.state.pattern_model if pat_data.count(0) == len(pat_data): raise ValueError('Labeled patterns must not be empty') window_size = self.state.window_size all_corr = utils.get_correlation_gen(data, window_size, pat_data) all_corr_peaks = utils.find_peaks(all_corr, window_size * 2) filtered = self.__filter_detection(all_corr_peaks, data) filtered = list(filtered) return [(item, item + window_size * 2) for item in filtered]
def calc_best_year_peak_detection(self, entities): # for each pair of entities (unordered): pairs = list(itertools.combinations(entities, 2)) best_years = [] for pair in pairs: peak_years = utils.find_peaks(pair[0], pair[1], self.year_to_model) longest_sequence = utils.find_longest_sequence(peak_years) if not longest_sequence: # if didn't find any peak return None middle_year = utils.get_middle_year(longest_sequence[0], longest_sequence[-1]) best_years.append(middle_year) avg_best_year = round(np.array(best_years).mean()) return avg_best_year
def __calculate_press_down_positions(self): """ Calculates the frames where the inner area is in the bottom position. The result is set to the peaks attribute. Method just for easy reading. """ if self.y_pos_history is None: raise ValueError('Tracker is not initialized') # Normalize the vertical positions of the top-left corner of the # bounding box of the tracking object. The limits are the initial y # position and 25, which is a value determined by observation self.y_pos_history = normalize(self.y_pos_history, self.y_bar_start, self.y_bar_start + 15) # Move the curve to start in 0 min_ = np.min(self.y_pos_history) self.y_pos_history = self.y_pos_history - min_ # Find the peaks of the sine-shape curve. self.peaks = find_peaks(self.y_pos_history, 0.5)
def expand_entity_word2vec_with_peak(self, entity, time, topk=None): """ get an entity and a timestamp find top 10 closest terms from the word2vec model of that time period for each term, check if it's a peak """ w2v_model = self.year_to_model[time] if topk is None: topk = self.k expansions = [] if entity in w2v_model.model: related_terms = w2v_model.model.most_similar(positive=entity, topk=10) # logging.debug("%i: '%s' is most similar to " % (time, predicted[0])) for related_tuple in related_terms: related_term = related_tuple[0] peak_years = utils.find_peaks(entity, related_term, self.year_to_model) # Count this relation as correct if the real year was identified as a peak (or close to a peak) if time in peak_years: expansions.append(related_tuple) if len(expansions) >= topk: break return expansions
def click_label(self,smoothing=1): if smoothing>1: working_profile = sp.signal.convolve(self.profile,np.ones((smoothing)),mode='same')/float(smoothing) else: working_profile = self.profile # find peaks and troughs: gthresh = 1.0/smoothing nslow = self.h5.h5[self.data_block].shape[1] peaks = np.sort(find_peaks(working_profile,gradient_threshold=gthresh)) idx = 0 z = np.arange(len(working_profile)) done = False or not len(peaks) fig = plt.figure(figsize=(22,12)) for key in plt.rcParams.keys(): if key[:6]=='keymap': plt.rcParams[key] = '' global current_x,current_label,label_dict # try to get the label dictionary from the current dataset # if none exists, mine the model database for a match label_dict = self.get_label_dict() if len(label_dict)==0: label_dict = self.find_matching_labels() current_x = 0 current_label = '' l1 = .05 l2 = .55 fw = .9 hw = .4 b1 = .55 b2 = .05 fh = .9 hh = .4 global bscanindex bscanindex = 0 def plot_at(x): global current_label,bscanindex if x in label_dict.values(): existing_label = [key for key, value in label_dict.items() if value == x][0] else: existing_label = '' plt.axes([l1,b2,hw,hh]) plt.cla() bscan = np.abs(self.h5.h5[self.data_block][0,bscanindex,:,:]) #bscan = shear(bscan,1) try: test = np.mean(bscan[:,-20:],axis=1) except: test = np.mean(bscan,axis=1) offset,goodness = translation1(test,working_profile,xlims=10,equalize=True) cmin = np.median(bscan) cmax = np.percentile(bscan,99.95) # saturate 0.05% of pixels plt.imshow(bscan,interpolation='none',clim=(cmin,cmax),cmap='gray') for label in label_dict.keys(): print label,label_dict[label] label_z = z[label_dict[label]] th = plt.text(bscan.shape[1],label_z-offset,label,ha='left',va='center',fontsize=8) try: plt.ylim((np.max(label_dict.values())+10,np.min(label_dict.values())-10)) except: pass plt.axes([l1,b1,fw,hh]) plt.cla() plt.plot(z,working_profile) plt.plot(z[x],working_profile[x],'ks') valid = np.where(working_profile)[0] plt.xlim((valid[0],valid[-1])) plt.autoscale(False) for label in label_dict.keys(): label_z = z[label_dict[label]] th = plt.text(label_z,working_profile[label_z],label,ha='center',va='bottom') plt.axes([l2,b2,hw,hh]) plt.cla() plt.plot(z,working_profile) plt.plot(z[x],working_profile[x],'ks') plt.xlim((z[x]-10,z[x]+10)) z1 = max(0,x-10) z2 = min(len(z),x+10) ymin = np.min(working_profile[z1:z2]) ymax = np.max(working_profile[z1:z2])*1.25 plt.text(z[x],working_profile[x],existing_label,ha='center',va='bottom') plt.ylim((ymin,ymax)) plt.title(current_label) plt.draw() def onclick(event): global current_x # print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f'%( # event.button, event.x, event.y, event.xdata, event.ydata) current_x = round(event.xdata) plot_at(current_x) def onpress(event): #print event.key global current_x,current_label,label_dict,bscanindex if event.key=='right': current_x = (current_x + 1)%len(working_profile) elif event.key=='ctrl+right': try: current_x = np.min(peaks[np.where(peaks>current_x)[0]]) except Exception as e: current_x = len(working_profile)-1 elif event.key=='left': current_x = (current_x - 1)%len(working_profile) elif event.key=='ctrl+left': try: current_x = np.max(peaks[np.where(peaks<current_x)[0]]) except Exception as e: current_x = 0 elif event.key=='shift+ctrl+right': try: current_x = peaks[np.where(peaks>current_x)[0]][5] except Exception as e: current_x = len(working_profile)-1 elif event.key=='shift+ctrl+left': try: current_x = peaks[np.where(peaks<current_x)[0]][-5] except Exception as e: current_x = 0 elif event.key=='shift': pass elif event.key=='/': pass elif event.key in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': current_label = current_label + event.key.upper() elif event.key=='backspace': current_label = current_label[:-1] elif event.key=='ctrl+delete': label_dict = {} elif event.key=='delete': for key in label_dict.keys(): if label_dict[key]==current_x: label_dict.pop(key) elif event.key=='enter': label_dict[current_label] = current_x print label_dict current_label = '' elif event.key=='pageup': bscanindex = (bscanindex + 1)%nslow elif event.key=='pagedown': bscanindex = (bscanindex - 1)%nslow plot_at(current_x) cid = fig.canvas.mpl_connect('button_press_event', onclick) pid = fig.canvas.mpl_connect('key_press_event', onpress) plot_at(current_x) plt.show() self.h5.require_group('model') self.h5.require_group('model/labels') for key in label_dict.keys(): self.h5.put('model/labels/%s'%key,label_dict[key]) mdb = H5(ocfg.model_database) did = self.h5.get('IDs/dataset_id').value # did is the primary key for the model, but we'll also save eccentricity did_key = '%d'%did mdb.require_group(did_key) si = self.h5.get('eccentricity/superior_inferior').value nt = self.h5.get('eccentricity/nasal_temporal').value radial_distance = np.sqrt(si**2+nt**2) mdb.put('%s/superior_inferior'%did_key,si) mdb.put('%s/nasal_temporal'%did_key,nt) mdb.put('%s/radial_distance'%did_key,radial_distance) mdb.put('%s/profile'%did_key,self.profile) mdb.require_group('%s/labels'%did_key) for key in label_dict.keys(): mdb.put('%s/labels/%s'%(did_key,key),label_dict[key]) mdb.close()
def pipeline(init, image, debug=0): init['frameno'] += 1 [undistorted_image] = undistort([image]) color_binary_image, binary_image = apply_color_transform(undistorted_image) cropped_image = crop(np.copy(binary_image), 440, debug=debug) warped = transform_perspective(cropped_image, init['src'], init['dst']) # eroded = cv2.erode(warped, np.ones((3, 3))) warped = cv2.dilate(warped, np.ones((7, 3))) if not init['peaks'][0] or not init['peaks'][1]: init['peaks'] = find_peaks(warped, init['lane_width']) left_x_points, left_y_points, right_x_points, right_y_points = identify_lines( warped, init['peaks'], nwindows=7, debug=debug) if len(left_x_points) and len(right_x_points): curr_lane_width = right_x_points[0] - left_x_points[0] init['lane_width'] = int(init['lane_width'] * 0.1 + curr_lane_width * 0.9) init['peaks'] = [int(left_x_points[0]), int(right_x_points[0])] init['last_fit'][0] = get_fit(left_x_points, left_y_points, init.get('last_fit', [None, None])[0]) init['last_fit'][1] = get_fit(right_x_points, right_y_points, init.get('last_fit', [None, None])[1]) result, ploty, left_fitx, right_fitx = annotate_image(init, warped, undistorted_image, left_x_points, right_x_points, debug=debug) if debug: f, axs = plt.subplots(2, 3, figsize=(30, 10)) f.tight_layout() image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) axs[0][0].imshow(image) axs[0][1].imshow(color_binary_image) axs[0][2].imshow(binary_image, cmap='gray') axs[1][0].imshow(cropped_image, cmap='gray') axs[1][1].imshow(warped, cmap='gray') axs[1][0].plot(*init['src'][0], 'o') axs[1][0].plot(*init['src'][1], '*') axs[1][0].plot(*init['src'][2], 'x') axs[1][0].plot(*init['src'][3], '+') axs[1][1].plot(*init['dst'][0], 'o') axs[1][1].plot(*init['dst'][1], '*') axs[1][1].plot(*init['dst'][2], 'x') axs[1][1].plot(*init['dst'][3], '+') # axs[1][2].imshow(warped, cmap='gray') axs[1][1].plot(left_fitx, ploty, color='yellow') axs[1][1].plot(right_fitx, ploty, color='yellow') axs[1][1].plot(left_x_points, left_y_points, 'o', color='red') axs[1][1].plot(right_x_points, right_y_points, 'o', color='red') plt.xlim(0, 1280) plt.ylim(720, 0) result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB) axs[1][2].imshow(result) f.savefig('figure.png') if debug >= 1: plt.imsave('frame.png', image) # plt.imsave('./writeup-examples/undist.png', cv2.cvtColor(undistorted_image, cv2.COLOR_BGR2RGB)) # plt.imsave('./writeup-examples/binary.png', binary_image, cmap='gray') # plt.imsave('./writeup-examples/perspective.png', warped, cmap='gray') # plt.imsave('./writeup-examples/result.png', result, cmap='gray') import pdb; pdb.set_trace() return result