def get_form(self): from random import randint #simulated morphological alternations #chance of a feature being "left off" due to context/use (inflectional/derivational) #morphable_features = ["stop", "voiced", "fricative", "spread"] onset = self.onset nuc = self.nucleus coda = self.coda morph_chance = 75 # 75% chance for each feature to be included ofl = [ f for f in onset.features if ((randint(0, 99) < morph_chance) or f is "null") ] cfl = [ f for f in coda.features if ((randint(0, 99) < morph_chance) or f is "null") ] onset_cc = Segment.Segment(onset.name, ofl, onset.symbol) coda_cc = Segment.Segment(coda.name, cfl, coda.symbol) form = Word(onset_cc, nuc, coda_cc, self.percept, self.noise) return form
def create_snake(): segments = [ Segment(SEG_SIZE, SEG_SIZE), Segment(SEG_SIZE * 2, SEG_SIZE), Segment(SEG_SIZE * 3, SEG_SIZE) ] return Snake(segments)
def getRandomPoint(poly: 'Polygon', lower: int) -> 'Point': mostLeftPoint = poly.listOfEdges[0].start for edge in poly.listOfEdges: if mostLeftPoint > edge.start: mostLeftPoint = edge.start if mostLeftPoint > edge.end: mostLeftPoint = edge.end mostRightPoint = poly.listOfEdges[0].start for edge in poly.listOfEdges: if mostRightPoint < edge.start: mostRightPoint = edge.start if mostRightPoint < edge.end: mostRightPoint = edge.end mostUpPoint = poly.listOfEdges[0].start for edge in poly.listOfEdges: if mostUpPoint.y > edge.start.y: mostUpPoint = edge.start if mostUpPoint.y > edge.end.y: mostUpPoint = edge.end mostDownPoint = poly.listOfEdges[0].start for edge in poly.listOfEdges: if mostDownPoint.y < edge.start.y: mostDownPoint = edge.start if mostDownPoint.y < edge.end.y: mostDownPoint = edge.end randomx = random.uniform(mostLeftPoint.x, mostRightPoint.x) randomy = random.uniform(mostDownPoint.y, mostUpPoint.y) randomPoint = Point(randomx, randomy, lower) segment = Segment(Point(mostLeftPoint.x - 1, mostUpPoint.y), randomPoint) counter = 0 for edge in poly.listOfEdges: if Segment.doIntersect(segment, edge): counter += 1 while not counter % 2: randomx = random.uniform(mostLeftPoint.x, mostRightPoint.x) randomy = random.uniform(mostDownPoint.y, mostUpPoint.y) randomPoint = Point(randomx, randomy, lower) segment = Segment(Point(mostLeftPoint.x - 1, mostUpPoint.y), randomPoint) counter = 0 for edge in poly.listOfEdges: if Segment.doIntersect(segment, edge): counter += 1 return randomPoint
def deserialize(self, data): # TODO Add validation self.x_0 = data.get('x_0') self.y_0 = data.get('y_0') self.vx = data.get('vx') self.vy = data.get('vy') self.count = data.get('count') m_l = data.get('mirrors') if m_l: mrs = [] for m_data in m_l: mir = Mirror.Mirror(0, 0, 0, 0, 'flat', 0) mir.deserialize(m_data) mrs.append(mir) self.mirrors = mrs m_data = data.get('last_mirror') if m_data is not None: mir = Mirror.Mirror(0, 0, 0, 0, 'flat', 0) mir.deserialize(m_data) self.last_mirror = mir else: self.last_mirror = None s_l = data.get('segment_list') segments = [] for s in s_l: seg = Segment.Segment(0, 0, 0, 0) seg.deserialize(s) segments.append(seg) self.segment_list = segments
def __init__(self,name,x,y,direction,playerType): head = pyglet.image.load('head.png') head.width = 36 head.height = 36 head.anchor_x = 18 head.anchor_y = 18 self.playerType = playerType self.playerName = name self.length = 7 self.direction = direction self.positionX = x self.positionY = y self.squaresoccupied = list() self.score = 0 self.moveQueue = list() self.segments = list() self.segBatch = pyglet.graphics.Batch() self.snakeSprite = pyglet.sprite.Sprite(head) for i in range(self.length): self.squaresoccupied.append(tuple((x,y+(i*self.direction)))) self.segments.append(Seg.Segment(340+(x*40),60+(y*40)+(i*40*self.direction),self.direction,self.playerType)) for element in self.segments: element.segmentSprite.batch = self.segBatch
def SegmentImage(image, filledImage, contours): #Create a list for all the segments found in the image segments = list() #Create a collective image that has all segments imagecollective = np.zeros((image.shape[0], image.shape[1])) imagecollective[:, :] = 0.5 #Segment every contour for contour in contours: #Create new segment segment = Segment() #Get aspect ratio segment.BoundingBox = BoundingBox(np.min(contour[:, 1]), np.max(contour[:, 1]), np.min(contour[:, 0]), np.max(contour[:, 0])) segment.BoundingBox.ToInt() #Create the segment image (will be replaced with segment class) segment.CreateImage() #Copy the segment from the contour we have to the segment rr, cc = polygon(contour[:, 0], contour[:, 1], image.shape) rr = rr.astype(int) cc = cc.astype(int) segment.Image[rr - segment.BoundingBox.miny, cc - segment.BoundingBox.minx] = image[rr, cc] segment.FilledImage[rr - segment.BoundingBox.miny, cc - segment.BoundingBox.minx] = filledImage[rr, cc] imagecollective[rr, cc] = image[rr, cc] segments.append(segment) return imagecollective, segments
def setSegments(self): ''' See the documentation for setSquares(). It works similarly, except handles the segments of the snake for drawing in pyglet. Also specifies the batches of the snake. ''' self.segments.insert(0,Seg.Segment(340+(self.positionX*40),(60+(self.positionY*40)),self.direction,self.playerType)) if len(self.segments) > self.length: self.segments.pop() for element in self.segments: element.segmentSprite.batch = self.segBatch
def makeSegments(self, segmentsNew, filtName=None, species=None, subfilter=None): """ Adds segments to self.segments """ if self.method == "Click": # Batmode: segmentsNew should be already prepared as: [x1, x2, labels] y1 = 0 y2 = 0 if len(segmentsNew) != 3: print("Warning: segment format does not match bat mode") segment = Segment.Segment( [segmentsNew[0], segmentsNew[1], y1, y2, segmentsNew[2]]) self.segments.addSegment(segment) elif subfilter is not None: # for wavelet segments: (same as self.species!="Any sound") y1 = subfilter["FreqRange"][0] y2 = min(subfilter["FreqRange"][1], self.sampleRate // 2) for s in segmentsNew: segment = Segment.Segment([ s[0][0], s[0][1], y1, y2, [{ "species": species, "certainty": s[1], "filter": filtName, "calltype": subfilter["calltype"] }] ]) self.segments.addSegment(segment) else: # for generic all-species segments: y1 = 0 y2 = 0 species = "Don't Know" cert = 0.0 self.segments.addBasicSegments(segmentsNew, [y1, y2], species=species, certainty=cert)
def prepare_song(self, song_name): with open(song_name.replace(".wav", "") + '.json') as f: data = json.load(f) song_useable_segments = [] for segment in data: if segment['useable']: song_useable_segments.append( Segment.Segment().setJSON(segment)) for segment in song_useable_segments: print(segment.toString()) song_segment = song_useable_segments[0] song_time_per_chord = song_segment.average_time_per_beat return song_segment, song_time_per_chord
def MergeHalfCircles(circles): newCirclesList = list() merged = np.zeros(len(circles)) for i in range(len(circles)): if (merged[i] == 1): continue c1 = circles[i] c2 = None for j in range(len(circles)): if (i == j or merged[j]): continue if (abs(c1.BoundingBox.miny - circles[j].BoundingBox.miny) < circlesMergeThreshold and abs(c1.BoundingBox.maxy - circles[j].BoundingBox.maxy) < circlesMergeThreshold and (abs(c1.BoundingBox.minx - circles[j].BoundingBox.maxx) < circlesMergeThreshold * 10 or abs(c1.BoundingBox.maxx - circles[j].BoundingBox.minx) < circlesMergeThreshold * 10)): c2 = circles[j] merged[j] = 1 break if (c2 != None and c1.BoundingBox.minx > c2.BoundingBox.minx): c1, c2 = c2, c1 width = c1.Image.shape[1] height = c1.Image.shape[0] if (c2 != None): width = max(width, c2.Image.shape[1]) height = max(height, c2.Image.shape[0]) #Resize img1 tmp = np.copy(c1.Image) c1.Image = np.zeros((height, width)) c1.Image[:tmp.shape[0], :tmp.shape[1]] = tmp[:, :] if (c2 != None): #Resize img2 tmp = np.copy(c2.Image) c2.Image = np.zeros((height, width)) c2.Image[:tmp.shape[0], :tmp.shape[1]] = tmp[:, :] newCircle = np.concatenate((c1.Image, c2.Image), axis=1) segment = Segment() segment.BoundingBox = c1.BoundingBox segment.Image = newCircle newCirclesList.append(segment) merged[i] = 1 else: newCirclesList.append(c1) connectCircles(newCirclesList) for circle in newCirclesList: contours = find_contours(circle.Image, 0.9) circle.FilledImage = FillPolygon(circle.Image, contours) return newCirclesList
def main(): ################# Robot Stuff ##################### #Declare the segments the robot arm will contain #Can have more than this many segments. s1 = sg.Segment(1,0) s2 = sg.Segment(1,0) s3 = sg.Segment(1,0) #Place segments into a list, this list is used to initialize the robot arm segments = [s1,s2,s3] #Declare the angle configurations of the arm. angleConfig = [0,0,0] ################ Canvas Stuff #################### #configure height and width for canvas wid = 640 hei = 480 scale = 50 master = Tk() master.title = "ArmSim" vas = Canvas(master,width = wid, height = hei) vas.configure(background='white') #Canvas does not respond to keyboard commands if it is not focused on #So this important vas.focus_set() vas.pack() drawGrid(vas,hei,wid,scale) r1 = ra.RobotArm(segments,angleConfig,vas) r1.drawArm() master.mainloop() return
def calc_ray_step(self, mirrors): inters_mirrors = [] for mirror in mirrors: if mirror is not self.last_mirror: prs, x, y = intersect(self.x_0, self.y_0, self.x_0 + self.vx * 1000, self.y_0 + self.vy * 1000, mirror.x1, mirror.y1, mirror.x2, mirror.y2) if prs: # ищем ближайшее зеркало distance = math.sqrt((self.x_0 - x)**2 + (self.y_0 - y)**2) if distance >= EPS: inters_mirrors.append((distance, mirror, x, y)) inters_mirrors.sort(key=lambda s: s[0]) curr_mirror = inters_mirrors[0] if inters_mirrors and inters_mirrors[ 0][1] is not self.last_mirror else None if curr_mirror and self.count > 0: x, y = curr_mirror[2], curr_mirror[3] ray = Segment.Segment(self.x_0, self.y_0, x, y) self.segment_list.append(ray) ai_x, ai_y = reflect(self.x_0, self.y_0, x, y, curr_mirror[1]) if self.win_circle: # нужно посчитать, попадает ли луч в круг. a = (y - self.y_0) b = (self.x_0 - x) ln_w = a * a + b * b c_new = self.y_0 * x - self.x_0 * y + a * self.win_circle[ 0] + b * self.win_circle[1] x_w = -(a * c_new) / ln_w y_w = -(b * c_new) / ln_w ln_r = math.sqrt(x_w * x_w + y_w * y_w) if ln_r < self.win_circle[2]: print('Вы победили за ' + str(self.base_count - self.count) + ' шагов(-а)') return True self.vx = ai_x self.vy = ai_y self.x_0 = x self.y_0 = y self.last_mirror = curr_mirror[1] else: print("Ой") print(self) print(inters_mirrors) self.count -= 1 return False
def drawSingle(self): xs = self.ui.spin_box_xs.value() ys = self.ui.spin_box_ys.value() xe = self.ui.spin_box_xe.value() ye = self.ui.spin_box_ye.value() self.singleAlgorithm = self.getAlg( self.ui.combobox_alg_single.currentText()) if QPoint(xs, ys) != QPoint(xe, ye): drawableObject = Segment(xs, ys, xe, ye, self.singleAlgorithm, self.singleColor) self.newWindow = Drawing(drawableObject) self.newWindow.show() else: self.__showErrorMessage("Начало и конец отрезка совпадают!")
def generateFlight(poly: 'Polygon', lower: int, upper: int, flightType: str, closeRange: int) -> 'Flight': if flightType == "external": randomEdgeEnter = random.randint(0, len(poly.listOfEdges) - 1) randomEdgeExit = random.randint(0, len(poly.listOfEdges) - 1) enterPoint = poly.listOfEdges[randomEdgeEnter].getRandomPoint(poly.lowerLimit, poly.upperLimit, flightType) exitPoint = poly.listOfEdges[randomEdgeExit].getRandomPoint(poly.lowerLimit, poly.upperLimit, flightType) v = random.randint(lower, upper) return Flight(enterPoint, exitPoint, v, flightType, [Segment(enterPoint, exitPoint)], closeRange) elif flightType == "internal": randomEnterPoint = Polygon.getRandomPoint(poly, poly.lowerLimit) randomExitPoint = Polygon.getRandomPoint(poly, poly.lowerLimit) randomPoint1 = Polygon.getRandomPointHigher(poly, poly.lowerLimit, poly.upperLimit) randomPoint2 = Polygon.getRandomPointSameHeight(poly, randomPoint1) segment1 = Segment(randomEnterPoint, randomPoint1) segment2 = Segment(randomPoint1, randomPoint2) segment3 = Segment(randomPoint2, randomExitPoint) v = random.randint(lower, upper) return Flight(randomEnterPoint, randomPoint1, v, "internal", [segment1, segment2, segment3], closeRange) elif flightType == "half-internal": randNum = random.randint(1, 2) if randNum == 1: # ulazi i silazi randomEdgeEnter = random.randint(0, len(poly.listOfEdges) - 1) enterPoint = poly.listOfEdges[randomEdgeEnter].getRandomPoint(poly.lowerLimit, poly.upperLimit, flightType) randomHeight = random.uniform(poly.lowerLimit, poly.upperLimit) randomPoint = Polygon.getRandomPoint(poly, randomHeight) exitPoint = Polygon.getRandomPoint(poly, poly.lowerLimit) segment1 = Segment(enterPoint, randomPoint) segment2 = Segment(randomPoint, exitPoint) v = random.randint(lower, upper) return Flight(enterPoint, randomPoint, v, "half-internal", [segment1, segment2], closeRange) else: # penje se i izlazi enterPoint = Polygon.getRandomPoint(poly, poly.lowerLimit) randomHeight = random.uniform(poly.lowerLimit, poly.upperLimit) randomPoint = Polygon.getRandomPoint(poly, randomHeight) exitPoint = Polygon.getRandomPoint(poly, poly.lowerLimit) segment1 = Segment(enterPoint, randomPoint) segment2 = Segment(randomPoint, exitPoint) v = random.randint(lower, upper) return Flight(enterPoint, randomPoint, v, "half-internal", [segment1, segment2], closeRange)
def DetectNoteheads(notesAndConnectedSegments): newList = list() newSegmentsList = list() for seg in notesAndConnectedSegments: image = np.copy(seg.Image) tmpImg = np.copy(image[1:-2, 1:-2]) ones = np.sum(image[5:-5, 5:-5] == 1) zeros = np.sum(image[5:-5, 5:-5] == 0) if (ones > 0 and zeros / ones > 1.2): image = ndimage.binary_fill_holes(image).astype(int) image = binary_erosion(image, selement) image = binary_erosion(image, selement) image = binary_erosion(image, selement2) newSeg = Segment(seg) newSeg.Image = image newSeg.BoundingBox = seg.BoundingBox newList.append(image) newSegmentsList.append(newSeg) return newSegmentsList, newList
def handleLine(self, line, fileName="", index=0): """This function sends a line to translation to assembly based on if its a pop, push or arithmetic command""" lineEdit = line.replace("\n", "").rstrip() lineSplit = lineEdit.split(" ") if len(lineSplit) == 3: if lineSplit[0] == "function": lineObject = FD.FunctionDecl(lineSplit[1], lineSplit[2]) self.funcName = lineSplit[1] + "$" elif lineSplit[0] == "call": lineObject = FC.FunctionCall(index, self.funcName, lineSplit[1], lineSplit[2]) else: lineObject = S.Segment(line, lineSplit[0], lineSplit[1], lineSplit[2], fileName) elif self.isArithmetic(line): # Arithmetic lineObject = A.Arithmetic(line, lineSplit[0], index, self.funcName) elif ("goto" in line) or ("label" in line): lineObject = C.Conditionals(line, self.funcName) elif "return" in line: lineObject = FD.FunctionDecl() return "//" + line + "\n" + lineObject.writeLine()
def isInPoly(self, point: 'Point') -> bool: mostLeftPoint = self.listOfEdges[0].start for edge in self.listOfEdges: if mostLeftPoint > edge.start: mostLeftPoint = edge.start if mostLeftPoint > edge.end: mostLeftPoint = edge.end mostUpPoint = self.listOfEdges[0].start for edge in self.listOfEdges: if mostUpPoint.y > edge.start.y: mostUpPoint = edge.start if mostUpPoint.y > edge.end.y: mostUpPoint = edge.end segment = Segment(Point(mostLeftPoint.x - 1, mostUpPoint.y), point) counter = 0 for edge in self.listOfEdges: if Segment.doIntersect(segment, edge): counter += 1 return counter % 2
def make_grid(self): """ make a grid for the galaxy :rtype : list[list[Segment]] :return: grid, a 2D list of galaxy segments, some of which will be planets """ # initialise the grid grid = [] # cycle through for each column for i in range(0, self.m_size): # then cycle through for each element, currently only supports square galaxies column = [] for j in range(0, self.m_size): # randomly determine whether or not to make a planet, if not just make an empty # segment if random.randint(0, 100) < 10: column.append(Planet.Planet(i_name=('earth' + str(i + j)))) else: column.append(Segment.Segment()) grid.append(column) # finally return the grid return grid
def showSpecDerivs(): import SignalProc reload(SignalProc) import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import wavio #wavobj = wavio.read('Sound Files/tril1.wav') #wavobj = wavio.read('Sound Files/010816_202935_p1.wav') #wavobj = wavio.read('Sound Files/20170515_223004 piping.wav') wavobj = wavio.read('Sound Files/kiwi_1min.wav') fs = wavobj.rate data = wavobj.data[:20 * fs] if data.dtype is not 'float': data = data.astype('float') # / 32768.0 if np.shape(np.shape(data))[0] > 1: data = data[:, 0] import SignalProc sp = SignalProc.SignalProc(data, fs, 256, 128) sg = sp.spectrogram(data, multitaper=False) h, v, b = sp.spectralDerivatives() h = np.abs(np.where(h == 0, 0.0, 10.0 * np.log10(h))) v = np.abs(np.where(v == 0, 0.0, 10.0 * np.log10(v))) b = np.abs(np.where(b == 0, 0.0, 10.0 * np.log10(b))) s = Segment(data, sg, sp, fs, 50) hm = np.max(h[:, 10:], axis=1) inds = np.squeeze( np.where(hm > (np.mean(h[:, 10:] + 2.5 * np.std(h[:, 10:]))))) segmentsh = s.identifySegments(inds, minlength=10) vm = np.max(v[:, 10:], axis=1) inds = np.squeeze( np.where(vm > (np.mean(v[:, 10:] + 2.5 * np.std(v[:, 10:]))))) segmentsv = s.identifySegments(inds, minlength=10) bm = np.max(b[:, 10:], axis=1) segs = np.squeeze( np.where(bm > (np.mean(b[:, 10:] + 2.5 * np.std(b[:, 10:]))))) segmentsb = s.identifySegments(segs, minlength=10) #print np.mean(h), np.max(h) #print np.where(h>np.mean(h)+np.std(h)) app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.show() mw.resize(800, 600) win = pg.GraphicsLayoutWidget() mw.setCentralWidget(win) vb1 = win.addViewBox(enableMouse=False, enableMenu=False, row=0, col=0) im1 = pg.ImageItem(enableMouse=False) vb1.addItem(im1) im1.setImage(10. * np.log10(sg)) vb2 = win.addViewBox(enableMouse=False, enableMenu=False, row=1, col=0) im2 = pg.ImageItem(enableMouse=False) vb2.addItem(im2) im2.setImage(h) for seg in segmentsh: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb2.addItem(a, ignoreBounds=True) vb3 = win.addViewBox(enableMouse=False, enableMenu=False, row=2, col=0) im3 = pg.ImageItem(enableMouse=False) vb3.addItem(im3) im3.setImage(v) for seg in segmentsv: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb3.addItem(a, ignoreBounds=True) vb4 = win.addViewBox(enableMouse=False, enableMenu=False, row=3, col=0) im4 = pg.ImageItem(enableMouse=False) vb4.addItem(im4) im4.setImage(b) for seg in segmentsb: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb4.addItem(a, ignoreBounds=True) QtGui.QApplication.instance().exec_()
def add_segment(self): '''Add by segment snake''' last_seg = c.coords(self.segments[0].instance) x = last_seg[2] - SEG_SIZE y = last_seg[3] - SEG_SIZE self.segments.insert(0, Segment(x, y))
from tkinter import * from Segment import * LENGTH = 30 NUMBER = 20 pen = Tk() canvas = Canvas(pen, width=700, height=500, bg='black') canvas.pack(expand=1, fill='both') mouse = Vector(300, 300) canvas.bind("<Motion>", lambda event: mouse.set(event.x, event.y)) current = Segment((250, 250), LENGTH) current.follow(320, 200) current.show(canvas) for i in range(NUMBER): parent = Segment(current, LENGTH) parent.show(canvas) current = parent try: while True: canvas.delete('all') head = current current.follow(mouse.x, mouse.y) current.show(canvas) while current.child:
def spawn(self, num, _length, pos): for i in range(0, num): self.objects.append(s.Segment(self.screen, i + 1, _length, pos)) self.update_list(i + 1, self.objects[i].get_position()[0], self.objects[i].get_position()[1])
def fullExtract(song_name): if not song_name: return json_song_name = song_name.replace(".wav", "") + ".json" with open(json_song_name, "w") as json_file: dump_data = [] song_data, sr = dbt.load_and_resample(song_name) song_data = librosa.effects.harmonic(song_data) song_segments = [Segment.Segment(0, len(song_data) / sr, "")] for idx in range(len(song_segments)): print("Loaded song_data with sr", sr, "length", len(song_data) / sr) print("Segment info:", song_segments[idx].toString(), "idx", idx) #Get Downbeats downbeats = dbt.extract_downbeat(4, song_data) time, beat_num = downbeats[:, 0].tolist(), downbeats[:, 1].tolist() print("Number of beats", len(time)) #Get Chords chord_dict, chord_result = dbt.get_chords_old( time, song_data, sr, False) #Smoothen the chords print("Detected Chords", chord_result) new_chord_result, isAligned = Alignment.chord_alignment( chord_result) song_segments[idx].useable = isAligned #Squash the same beats together if isAligned: boolean_array = [False for i in range(len(new_chord_result))] for i in range(len(new_chord_result)): if i == 0 or new_chord_result[i] != new_chord_result[i - 1]: boolean_array[i] = True new_chords = [] new_time = [] for i in range(len(boolean_array)): if boolean_array[i]: new_chords.append(new_chord_result[i]) new_time.append(time[i]) new_chord_result = new_chords time = new_time #Average time per beat avg_time_beat = 0 for i in range(len(time) - 1): avg_time_beat += time[i + 1] - time[i] avg_time_beat = avg_time_beat / len(time) print("Average time_per_chord", avg_time_beat) if not isAligned: print("Failed to align chord") #Write to json object song_segments[idx].chords = new_chord_result song_segments[idx].downbeats = time song_segments[idx].average_time_per_beat = avg_time_beat beat_num = [1 for i in range(len(time))] #Write to chords and downbeat (for debug) dbt.print_and_save_chord(song_name, time, song_segments[idx].chords, str(idx)) dbt.print_and_save_downbeat(song_name, time, beat_num, str(idx)) dump_data.append(song_segments[idx].getJSON()) json.dump(dump_data, json_file, indent=4)
def construct_weibo_feature(): constructor = wfc.WeiboFeatureConstructor(cnn.WeiboConnector(), cnn.WikiConnector(), seg.Segment()) constructor.get_weibo_text_mock()
def partition_segment(new_seg, seg_start, seg_end, rest_pupil_size, export_pupilinfo): """ A helper method for splitting a Segment object into new Segments and removing gaps of invalid samples One way to deal with a low quality Segment is to find the gaps of invalid samples within its "Datapoint"s and splitting the Segment into two Segments one from the beginnning of the Segment to the gap and another from after the gap to the end of the Segment. This can be done multiple times resulting multiple "Segment"s with higher quality. For example if a Segment S1 started at s1 and ended at e1 and had two invalid gaps between gs1-ge1 and gs2-ge2 milliseconds, this method will generate the following three segments SS1: starting at s1 and ending at gs1 SS2: starting at ge1 and ending at gs2 SS3: starting at ge2 and ending at e1 Args: new_seg: The Segment that is being split seg_start: An integer showing the start time of the segment in milliseconds seg_end: An integer showing the end time of the segment in milliseconds rest_pupil_size: rest pupil size for the current scene Returns: subsegments: a list of newly generated "Segment"s samp_inds: a list of tuples of the form (start, end) that detrmines the index of the start and end of each new Segment in the old Segment's all_data field fix_inds: a list of tuples of the form (start, end) that detrmines the index of the start and end of each new Segment in the old Segment's fixation_data field """ timegaps = new_seg.getgaps() subsegments = [] sub_segid=0 samp_inds = [] fix_inds = [] event_inds = [] last_samp_idx = 0 last_fix_idx = 0 last_event_idx = 0 sub_seg_time_start = seg_start for timebounds in timegaps: sub_seg_time_end = timebounds[0] #end of this sub_seg is start of this gap last_samp_idx, all_start,all_end = get_chunk(all_data, last_samp_idx, sub_seg_time_start, sub_seg_time_end) last_fix_idx, fix_start, fix_end = get_chunk(fixation_data, last_fix_idx, sub_seg_time_start, sub_seg_time_end) if event_data != None: last_event_idx, event_start, event_end = get_chunk(event_data, last_event_idx, sub_seg_time_start, sub_seg_time_end) sub_seg_time_start = timebounds[1] #beginning of the next sub_seg is end of this gap if fix_end - fix_start>0: try: if event_data != None: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=event_data[event_start:event_end], aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) else: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=None, aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) except Exception as e: warn(str(e)) if params.DEBUG: raise else: continue else: continue subsegments.append(new_sub_seg) samp_inds.append((all_start,all_end)) fix_inds.append((fix_start, fix_end)) if event_data != None: event_inds.append((event_start, event_end)) sub_segid +=1 # handling the last sub_seg sub_seg_time_end = seg_end #end of last sub_seg is the end of seg last_samp_idx, all_start,all_end = get_chunk(all_data, last_samp_idx, sub_seg_time_start, sub_seg_time_end) last_fix_idx, fix_start, fix_end = get_chunk(fixation_data, last_fix_idx, sub_seg_time_start, sub_seg_time_end) if event_data != None: last_event_idx, event_start, event_end = get_chunk(event_data, last_event_idx, sub_seg_time_start, sub_seg_time_end) if fix_end - fix_start>0: #add the last sub_seg try: if event_data != None: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=event_data[event_start:event_end], aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) else: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=None, aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) except Exception as e: warn(str(e)) if params.DEBUG: raise else: new_sub_seg = None if new_sub_seg != None: subsegments.append(new_sub_seg) samp_inds.append((all_start,all_end)) fix_inds.append((fix_start, fix_end)) if event_data != None: event_inds.append((event_start, event_end)) #end of handling the last sub_seg return subsegments, samp_inds, fix_inds, event_inds
def __init__(self, scid, seglist, all_data, fixation_data, event_data = None, Segments = None, aoilist = None, prune_length= None, require_valid = True, auto_partition = False, rest_pupil_size = 0, export_pupilinfo = False): """ Args: scid: A string containing the id of the Scene. seglist: a list of tuples of the form (segid, start, end) defining the segments *Note: this method of defining segments is implemented to make batch processing of files defining segments easier all_data: a list of "Datapoint"s which make up this Scene. fixation_data: a list of "Fixation"s which make up this Scene. Segments: a list of "Segment"s which belong to this Scene. aoilist: If not None, a list of "AOI"s. prune_length: If not None, an integer that specifies the time interval (in ms) from the begining of each Segment of this Scene which samples are considered in calculations. This can be used if, for example, you only wish to consider data in the first 1000 ms of each Segment. In this case (prune_length = 1000), all data beyond the first 1000ms of the start of the "Segment"s will be disregarded. require_valid: a boolean determining whether invalid "Segment"s will be ignored when calculating the features or not. default = True auto_partition_low_quality_segments: a boolean flag determining whether EMDAT should automatically split the "Segment"s which have low sample quality into two new ssub "Segment"s discarding the largest invalid sample gap in the "Segment". default = False rest_pupil_size: rest pupil size for the current scene Yields: a Scene object """ ######################################## def partition_segment(new_seg, seg_start, seg_end, rest_pupil_size, export_pupilinfo): """ A helper method for splitting a Segment object into new Segments and removing gaps of invalid samples One way to deal with a low quality Segment is to find the gaps of invalid samples within its "Datapoint"s and splitting the Segment into two Segments one from the beginnning of the Segment to the gap and another from after the gap to the end of the Segment. This can be done multiple times resulting multiple "Segment"s with higher quality. For example if a Segment S1 started at s1 and ended at e1 and had two invalid gaps between gs1-ge1 and gs2-ge2 milliseconds, this method will generate the following three segments SS1: starting at s1 and ending at gs1 SS2: starting at ge1 and ending at gs2 SS3: starting at ge2 and ending at e1 Args: new_seg: The Segment that is being split seg_start: An integer showing the start time of the segment in milliseconds seg_end: An integer showing the end time of the segment in milliseconds rest_pupil_size: rest pupil size for the current scene Returns: subsegments: a list of newly generated "Segment"s samp_inds: a list of tuples of the form (start, end) that detrmines the index of the start and end of each new Segment in the old Segment's all_data field fix_inds: a list of tuples of the form (start, end) that detrmines the index of the start and end of each new Segment in the old Segment's fixation_data field """ timegaps = new_seg.getgaps() subsegments = [] sub_segid=0 samp_inds = [] fix_inds = [] event_inds = [] last_samp_idx = 0 last_fix_idx = 0 last_event_idx = 0 sub_seg_time_start = seg_start for timebounds in timegaps: sub_seg_time_end = timebounds[0] #end of this sub_seg is start of this gap last_samp_idx, all_start,all_end = get_chunk(all_data, last_samp_idx, sub_seg_time_start, sub_seg_time_end) last_fix_idx, fix_start, fix_end = get_chunk(fixation_data, last_fix_idx, sub_seg_time_start, sub_seg_time_end) if event_data != None: last_event_idx, event_start, event_end = get_chunk(event_data, last_event_idx, sub_seg_time_start, sub_seg_time_end) sub_seg_time_start = timebounds[1] #beginning of the next sub_seg is end of this gap if fix_end - fix_start>0: try: if event_data != None: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=event_data[event_start:event_end], aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) else: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=None, aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) except Exception as e: warn(str(e)) if params.DEBUG: raise else: continue else: continue subsegments.append(new_sub_seg) samp_inds.append((all_start,all_end)) fix_inds.append((fix_start, fix_end)) if event_data != None: event_inds.append((event_start, event_end)) sub_segid +=1 # handling the last sub_seg sub_seg_time_end = seg_end #end of last sub_seg is the end of seg last_samp_idx, all_start,all_end = get_chunk(all_data, last_samp_idx, sub_seg_time_start, sub_seg_time_end) last_fix_idx, fix_start, fix_end = get_chunk(fixation_data, last_fix_idx, sub_seg_time_start, sub_seg_time_end) if event_data != None: last_event_idx, event_start, event_end = get_chunk(event_data, last_event_idx, sub_seg_time_start, sub_seg_time_end) if fix_end - fix_start>0: #add the last sub_seg try: if event_data != None: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=event_data[event_start:event_end], aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) else: new_sub_seg = Segment(segid+"_"+str(sub_segid), all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=None, aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) except Exception as e: warn(str(e)) if params.DEBUG: raise else: new_sub_seg = None if new_sub_seg != None: subsegments.append(new_sub_seg) samp_inds.append((all_start,all_end)) fix_inds.append((fix_start, fix_end)) if event_data != None: event_inds.append((event_start, event_end)) #end of handling the last sub_seg return subsegments, samp_inds, fix_inds, event_inds ######################################## if len(all_data)<=0: raise Exception('A scene with no sample data!') if Segments == None: self.segments = [] # print "seglist",seglist for (segid, start, end) in seglist: print "segid, start, end:",segid, start, end if prune_length != None: end = min(end, start+prune_length) _, all_start, all_end = get_chunk(all_data, 0, start, end) _, fix_start, fix_end = get_chunk(fixation_data, 0, start, end) if event_data != None: _, event_start, event_end = get_chunk(event_data, 0, start, end) if fix_end - fix_start>0: try: if event_data != None: new_seg = Segment(segid, all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=event_data[event_start:event_end], aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) else: new_seg = Segment(segid, all_data[all_start:all_end], fixation_data[fix_start:fix_end], event_data=None, aois=aoilist, prune_length=prune_length, rest_pupil_size = rest_pupil_size, export_pupilinfo = export_pupilinfo) except Exception as e: warn(str(e)) if params.DEBUG: raise else: continue else: continue if (new_seg.largest_data_gap > params.MAX_SEG_TIMEGAP) and auto_partition: #low quality segment that needs to be partitioned! new_segs, samp_inds, fix_inds, event_inds = partition_segment(new_seg, start, end, rest_pupil_size, export_pupilinfo=export_pupilinfo) if event_data != None: for nseg,samp,fix,eve in zip(new_segs, samp_inds, fix_inds, event_inds): if nseg.length > params.MINSEGSIZE: nseg.set_indices(samp[0],samp[1],fix[0],fix[1],eve[0],eve[1]) self.segments.append(nseg) else: for nseg,samp,fix in zip(new_segs, samp_inds, fix_inds): if nseg.length > params.MINSEGSIZE: nseg.set_indices(samp[0],samp[1],fix[0],fix[1]) self.segments.append(nseg) else: #good quality segment OR no auto_partition if event_data != None: new_seg.set_indices(all_start,all_end,fix_start,fix_end,event_start,event_end) else: new_seg.set_indices(all_start,all_end,fix_start,fix_end) self.segments.append(new_seg) else: self.segments = Segments #segments are already generated self.require_valid_Segments = require_valid if require_valid: #filter out the invalid Segments segments = filter(lambda x:x.is_valid,self.segments) else: segments = self.segments if len(segments)==0: raise Exception('no segments in scene %s!' %(scid)) fixationlist = [] eventlist = [] sample_list = [] totalfixations = 0 firstsegtime = float('infinity') endsegtime = float(0) firstseg = None for seg in segments: sample_st,sample_end,fix_start,fix_end,event_st,event_end = seg.get_indices() if params.DEBUG: print "sample_st,sample_end,fix_start,fix_end",sample_st,sample_end,fix_start,fix_end,event_st,event_end sample_list.append(all_data[sample_st:sample_end]) fixationlist.append(fixation_data[fix_start:fix_end]) totalfixations += len(fixationlist[-1]) if event_data != None: eventlist.append(event_data[event_st:event_end]) totalevents = len(eventlist[-1]) if seg.start < firstsegtime: firstsegtime = seg.start firstseg = seg if seg.end > endsegtime: endsegtime = seg.end endseg = seg self.firstseg = firstseg self.endseg = endseg self.scid = scid self.features = {} self.largest_data_gap = maxfeat(self.segments,'largest_data_gap') #self.segments is used to calculate validity of the scenes instead of segments which is only valid segments self.proportion_valid = weightedmeanfeat(self.segments,'numsamples','proportion_valid') #self.segments is used to calculate validity of the scenes instead of segments which is only valid segments self.proportion_valid_fix = weightedmeanfeat(self.segments,'numsamples','proportion_valid_fix') #self.segments is used to calculate validity of the scenes instead of segments which is only valid segments self.validity1 = self.calc_validity1() self.validity2 = self.calc_validity2() self.validity3 = self.calc_validity3() self.is_valid = self.get_validity() self.length = sumfeat(segments,'length') if self.length == 0: raise Exception('Zero length segments!') self.features['numsegments'] = len(segments) self.features['length'] = self.length self.start = minfeat(segments,'start') self.numfixations = sumfeat(segments,'numfixations') self.end = maxfeat(segments,'end') self.numsamples = sumfeat(segments, 'numsamples') self.features['numsamples'] = self.numsamples self.numfixations = sumfeat(segments, 'numfixations') self.features['numfixations'] = self.numfixations if prune_length == None: if self.numfixations != totalfixations: raise Exception('error in fixation count for scene:'+self.scid) #warn ('error in fixation count for scene:'+self.scid) self.features['fixationrate'] = float(self.numfixations) / self.length if self.numfixations > 0: self.features['meanfixationduration'] = weightedmeanfeat(segments,'numfixations',"features['meanfixationduration']") self.features['stddevfixationduration'] = stddev(map(lambda x: float(x.fixationduration), reduce(lambda x,y: x+y ,fixationlist)))## self.features['sumfixationduration'] = sumfeat(segments, "features['sumfixationduration']") self.features['fixationrate'] = float(self.numfixations)/self.length distances = self.calc_distances(fixationlist) abs_angles = self.calc_abs_angles(fixationlist) rel_angles = self.calc_rel_angles(fixationlist) else: self.features['meanfixationduration'] = 0 self.features['stddevfixationduration'] = 0 self.features['sumfixationduration'] = 0 self.features['fixationrate'] = 0 distances = [] if len(distances) > 0: self.features['meanpathdistance'] = mean(distances) self.features['sumpathdistance'] = sum(distances) self.features['stddevpathdistance'] = stddev(distances) self.features['eyemovementvelocity'] = self.features['sumpathdistance']/self.length self.features['sumabspathangles'] = sum(abs_angles) self.features['meanabspathangles'] = mean(abs_angles) self.features['abspathanglesrate'] = sum(abs_angles)/self.length self.features['stddevabspathangles'] = stddev(abs_angles) self.features['sumrelpathangles'] = sum(rel_angles) self.features['relpathanglesrate'] = sum(rel_angles)/self.length self.features['meanrelpathangles'] = mean(rel_angles) self.features['stddevrelpathangles'] = stddev(rel_angles) else: self.features['meanpathdistance'] = 0 self.features['sumpathdistance'] = 0 self.features['stddevpathdistance'] = 0 self.features['eyemovementvelocity'] = 0 self.features['sumabspathangles'] = 0 self.features['abspathanglesrate'] = 0 self.features['meanabspathangles']= 0 self.features['stddevabspathangles']= 0 self.features['sumrelpathangles'] = 0 self.features['relpathanglesrate'] = 0 self.features['meanrelpathangles']= 0 self.features['stddevrelpathangles'] = 0 """ calculate pupil dilation features (no rest pupil size adjustments yet)""" self.numpupilsizes = sumfeat(segments,'numpupilsizes') self.adjvalidpupilsizes = mergevalues(segments, 'adjvalidpupilsizes') if self.numpupilsizes > 0: # check if scene has any pupil data if export_pupilinfo: self.pupilinfo_for_export = mergevalues(segments, 'pupilinfo_for_export') self.features['meanpupilsize'] = weightedmeanfeat(segments, 'numpupilsizes', "features['meanpupilsize']") self.features['stddevpupilsize'] = stddev(self.adjvalidpupilsizes) self.features['maxpupilsize'] = maxfeat(segments, "features['maxpupilsize']") self.features['minpupilsize'] = minfeat(segments, "features['minpupilsize']") self.features['startpupilsize'] = self.firstseg.features['startpupilsize'] self.features['endpupilsize'] = self.endseg.features['endpupilsize'] else: self.pupilinfo_for_export = [] self.features['meanpupilsize'] = 0 self.features['stddevpupilsize'] = 0 self.features['maxpupilsize'] = 0 self.features['minpupilsize'] = 0 self.features['startpupilsize'] = 0 self.features['endpupilsize'] = 0 """end """ self.numdistances = sumfeat(segments,'numdistances') #Distance self.distances_from_screen = mergevalues(segments, 'distances_from_screen') if self.numdistances > 0: # check if scene has any pupil data self.features['meandistance'] = weightedmeanfeat(segments, 'numdistances', "features['meandistance']") self.features['stddevdistance'] = stddev(self.distances_from_screen) self.features['maxdistance'] = maxfeat(segments, "features['maxdistance']") self.features['mindistance'] = minfeat(segments, "features['mindistance']") self.features['startdistance'] = self.firstseg.features['startdistance'] self.features['enddistance'] = self.endseg.features['enddistance'] else: self.features['meandistance'] = 0 self.features['stddevdistance'] = 0 self.features['maxdistance'] = 0 self.features['mindistance'] = 0 self.features['startdistance'] = 0 self.features['enddistance'] = 0 """end """ if event_data != None: self.features['numevents'] = sumfeat(segments,'numevents') self.features['numleftclic'] = sumfeat(segments,"features['numleftclic']") self.features['numrightclic'] = sumfeat(segments, "features['numrightclic']") self.features['numdoubleclic'] = sumfeat(segments, "features['numdoubleclic']") self.features['numkeypressed'] = sumfeat(segments, "features['numkeypressed']") self.features['leftclicrate'] = float(self.features['numleftclic'])/self.length self.features['rightclicrate'] = float(self.features['numrightclic'])/self.length self.features['doubleclicrate'] = float(self.features['numdoubleclic'])/self.length self.features['keypressedrate'] = float(self.features['numkeypressed'])/self.length self.features['timetofirstleftclic'] = self.firstseg.features['timetofirstleftclic'] self.features['timetofirstrightclic'] = self.firstseg.features['timetofirstrightclic'] self.features['timetofirstdoubleclic'] = self.firstseg.features['timetofirstdoubleclic'] self.features['timetofirstkeypressed'] = self.firstseg.features['timetofirstkeypressed'] else: self.features['numevents'] = 0 self.features['numleftclic'] = 0 self.features['numrightclic'] = 0 self.features['numdoubleclic'] = 0 self.features['numkeypressed'] = 0 self.features['leftclicrate'] = 0 self.features['rightclicrate'] = 0 self.features['doubleclicrate'] = 0 self.features['keypressedrate'] = 0 self.features['timetofirstleftclic'] = -1 self.features['timetofirstrightclic'] = -1 self.features['timetofirstdoubleclic'] = -1 self.features['timetofirstkeypressed'] = -1 """end """ self.has_aois = False if aoilist: self.set_aois(segments, aoilist) self.features['aoisequence'] = self.merge_aoisequences(segments)
def detectClicks(): import SignalProc # reload(SignalProc) import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import wavio from scipy.signal import medfilt # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Clicks\\1ex\Lake_Thompson__01052018_SOUTH1047849_01052018_High_20180509_' # '20180509_183506.wav') # close kiwi and rain wavobj = wavio.read( 'D:\AviaNZ\Sound_Files\Clicks\Lake_Thompson__01052018_SOUTH1047849_01052018_High_20180508_' '20180508_200506.wav') # very close kiwi with steady wind # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Clicks\\1ex\Murchison_Kelper_Heli_25042018_SOUTH7881_25042018_High_' # '20180405_20180405_211007.wav') # wavobj = wavio.read('D:\AviaNZ\Sound_Files\\Noise examples\\Noise_10s\Rain_010.wav') # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Clicks\Ponui_SR2_Jono_20130911_021920.wav') # # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Clicks\CL78_BIRM_141120_212934.wav') # # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Clicks\CL78_BIRD_141120_212934.wav') # Loud click # wavobj = wavio.read('D:\AviaNZ\Sound_Files\Tier1\Tier1 dataset\positive\DE66_BIRD_141011_005829.wav') # close kiwi # wavobj = wavio.read('Sound Files/010816_202935_p1.wav') #wavobj = wavio.read('Sound Files/20170515_223004 piping.wav') # wavobj = wavio.read('Sound Files/test/DE66_BIRD_141011_005829.wav') #wavobj = wavio.read('/Users/srmarsla/DE66_BIRD_141011_005829_wb.wav') #wavobj = wavio.read('/Users/srmarsla/ex1.wav') #wavobj = wavio.read('/Users/srmarsla/ex2.wav') fs = wavobj.rate data = wavobj.data #[:20*fs] if data.dtype is not 'float': data = data.astype('float') # / 32768.0 if np.shape(np.shape(data))[0] > 1: data = data[:, 0] import SignalProc sp = SignalProc.SignalProc(data, fs, 128, 128) sg = sp.spectrogram(data, multitaper=False) s = Segment(data, sg, sp, fs, 128) # for each frq band get sections where energy exceeds some (90%) percentile # and generate a binary spectrogram sgb = np.zeros((np.shape(sg))) for y in range(np.shape(sg)[1]): ey = sg[:, y] # em = medfilt(ey, 15) ep = np.percentile(ey, 90) sgb[np.where(ey > ep), y] = 1 # If lots of frq bands got 1 then predict a click clicks = [] for x in range(np.shape(sg)[0]): if np.sum(sgb[x, :]) > np.shape(sgb)[1] * 0.75: clicks.append(x) app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.show() mw.resize(1200, 500) win = pg.GraphicsLayoutWidget() mw.setCentralWidget(win) vb1 = win.addViewBox(enableMouse=False, enableMenu=False, row=0, col=0) im1 = pg.ImageItem(enableMouse=False) vb1.addItem(im1) im1.setImage(sgb) if len(clicks) > 0: clicks = s.identifySegments(clicks, minlength=1) for seg in clicks: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb1.addItem(a, ignoreBounds=True) QtGui.QApplication.instance().exec_()
def dropEvent(self, event): pos = QPointF(self.imatrix.map(QPointF(event.pos()))) if self.dragElement: self._score.startCmd() self.dragElement.setScore(self._score) self._score.addRefresh(self.dragElement.abbox()) if self.dragElement.type() == ElementType.VOLTA or\ self.dragElement.type() == ElementType.OTTAVA or \ self.dragElement.type() == ElementType.TRILL or\ self.dragElement.type() == ElementType.PEDAL or\ self.dragElement.type() == ElementType.DYNAMIC or\ self.dragElement.type() == ElementType.HAIRPIN or\ self.dragElement.type() == ElementType.TEXTLINE: self.dragElement.setScore(self.score()) self.score().cmdAdd1(self.dragElement, pos, self.dragOffset) self.event.acceptProposedAction() elif self.dragElement.type() == ElementType.SYMBOL or\ self.dragElement.type() == ElementType.IMAGE: el = self.elementAt(pos) if el == 0: staffIdx = -1 seg = Segment() tick = 0 el = self._score.pos2measure(pos, tick, staffIdx, 0, seg, 0) if el == 0: print "cannot drop here\n" del self.dragElement self._score.addRefresh(el.abbox()) self._score.addRefresh(self.dragElement.abbox()) dropElement = el.drop(self, pos, self.dragOffset, self.dragElement) self._score.addRefresh(el.abbox()) if dropElement: self._score.select(dropElement, SelectType.SELECT_SINGLE, 0) self._score.addRefresh(dropElement.abbox()) event.acceptProposedAction() elif self.dragElement.type() == ElementType.KEYSIG or\ self.dragElement.type() == ElementType.CLEF or\ self.dragElement.type() == ElementType.TIMESIG or\ self.dragElement.type() == ElementType.BAR_LINE or\ self.dragElement.type() == ElementType.ARPEGGIO or\ self.dragElement.type() == ElementType.BREATH or\ self.dragElement.type() == ElementType.GLISSANDO or\ self.dragElement.type() == ElementType.BRACKET or\ self.dragElement.type() == ElementType.ARTICULATION or\ self.dragElement.type() == ElementType.ACCIDENTAL or\ self.dragElement.type() == ElementType.TEXT or\ self.dragElement.type() == ElementType.TEMPO_TEXT or\ self.dragElement.type() == ElementType.STAFF_TEXT or\ self.dragElement.type() == ElementType.NOTEHEAD or\ self.dragElement.type() == ElementType.TREMOLO or\ self.dragElement.type() == ElementType.LAYOUT_BREAK or\ self.dragElement.type() == ElementType.MARKER or\ self.dragElement.type() == ElementType.JUMP or\ self.dragElement.type() == ElementType.REPEAT_MEASURE or\ self.dragElement.type() == ElementType.ICON or\ self.dragElement.type() == ElementType.NOTE or\ self.dragElement.type() == ElementType.CHORD or\ self.dragElement.type() == ElementType.SPACER or\ self.dragElement.type() == ElementType.SLUR or\ self.dragElement.type() == ElementType.ACCIDENTAL_BRACKET: el = Element() for e in self.elementsAt(pos): if e.acceptDrop(self, pos, self.dragElement.type(), self.dragElement.subtype()): el = e break if not el: print "cannot drop here\n" del self.dragElement self._score.addRefresh(el.abbox()) self._score.addRefresh(self.dragElement.abbox()) dropElement = el.drop(self, pos, self.dragOffset, self.dragElement) self._score.addRefresh(el.abbox()) if dropElement: if not self._score.noteEntryMode(): self._score.select(dropElement, SelectType.SELECT_SINGLE, 0) self._score.addRefresh(dropElement.abbox()) event.acceptProposedAction() else: del self.dragElement self.dragElement = 0 self.setDropTarget(0) self.score().endCmd() return if event.mimeData().hasUrls(): ul = event.mimeData().urls() u = ul.front() if u.scheme() == "file": fi = QFileInfo(u.path()) s = Image() suffix = fi.suffix().toLower() if suffix == "svg": s = SvgImage(self.score()) elif suffix == "jpg" or suffix == "png" or suffix == "gif" or suffix == "xpm": s = RasterImage(self.score()) else: return self._score.startCmd() s.setPath(u.toLocalFile()) el = self.elementAt(pos) if el and (el.type() == ElementType.NOTE or el.type() == ElementType.REST): s.setTrack(el.track()) if el.type() == ElementType.NOTE: note = el s.setTick(note.chord().tick()) s.setParent(note.chord().segment().measure()) else: rest = el s.setTick(rest.tick()) s.setParent(rest.segment().measure()) self.score().undoAddElement(s) else: self.score().cmdAddBSymbol(s, pos, self.dragOffset) event.acceptProposedAction() self.score().endCmd() self.setDropTarget(0) return return md = event.mimeData() data = QFileInfo() if md.hasFormat(mimeSymbolListFormat): etype = ElementType.ELEMENT_LIST data = md.data(mimeSymbolListFormat) elif md.hasFormat(mimeStaffListFormat): etype = ElementType.STAFF_LIST data = md.data(mimeStaffListFormat) else: print "cannot drop this object: unknown mime type\n" sl = md.formats() for s in sl: print " %s\n" %s.toAscii().data() self._score.end() return doc = QDomDocument() (ok, err, line, column) = doc.setContent(data) if not ok: qWarning("error reading drag data at %d/%d: %s\n %s\n" %(line, column, err.toAscii().data(), data.data())) return docName = "--" el = self.elementAt(pos) if el == 0 or el.type() != ElementType.MEASURE: self.setDropTarget(0) return measure = el if etype == ElementType.ELEMENT_LIST: print "drop element list\n" elif etype == ElementType.MEASURE_LIST or etype == ElementType.STAFF_LIST: self._score.startCmd() s = measure.system() idx = s.y2staff(pos.y()) if idx != -1: seg = measure.first() while seg.subtype() != SegmentType.SegChordRest: seg = seg.next() self.score().pasteStaff(doc.documentElement(), seg.element(idx * VOICES)) event.acceptProposedAction() self._score.setLayoutAll(True) self._score.endCmd() self.setDropTarget(0)
def showSegs(): import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import wavio import WaveletSegment from time import time #wavobj = wavio.read('Sound Files/tril1.wav') #wavobj = wavio.read('Sound Files/010816_202935_p1.wav') #wavobj = wavio.read('Sound Files/20170515_223004 piping.wav') wavobj = wavio.read('Sound Files/kiwi_1min.wav') fs = wavobj.rate data = wavobj.data #[:20*fs] if data.dtype is not 'float': data = data.astype('float') # / 32768.0 if np.shape(np.shape(data))[0] > 1: data = data[:, 0] import SignalProc sp = SignalProc.SignalProc(data, fs, 256, 128) sg = sp.spectrogram(data, multitaper=False) s = Segment(data, sg, sp, fs, 50) # FIR: threshold doesn't matter much, but low is better (0.01). # Amplitude: not great, will have to work on width and abs if want to use it (threshold about 0.6) # Power: OK, but threshold matters (0.5) # Median clipping: OK, threshold of 3 fine. # Onsets: Threshold of 4.0 was fine, lower not. Still no offsets! # Yin: Threshold 0.9 is pretty good # Energy: Not great, but thr 1.0 ts = time() s1 = s.checkSegmentLength(s.segmentByFIR(0.1)) s2 = s.checkSegmentLength(s.segmentByFIR(0.01)) s3 = s.checkSegmentLength(s.medianClip(3.0)) s4 = s.checkSegmentLength(s.medianClip(2.0)) s5, p, t = s.yin(100, thr=0.5, returnSegs=True) s5 = s.checkSegmentLength(s5) s6 = s.mergeSegments(s2, s4) ws = WaveletSegment.WaveletSegment() s7 = ws.waveletSegment_test(None, data, fs, None, 'Kiwi', False) #print('Took {}s'.format(time() - ts)) #s7 = s.mergeSegments(s1,s.mergeSegments(s3,s4)) #s4, samp = s.segmentByFIR(0.4) #s4 = s.checkSegmentLength(s4) #s2 = s.segmentByAmplitude1(0.6) #s5 = s.checkSegmentLength(s.segmentByPower(0.3)) #s6, samp = s.segmentByFIR(0.6) #s6 = s.checkSegmentLength(s6) #s7 = [] #s5 = s.onsets(3.0) #s6 = s.segmentByEnergy(1.0,500) #s5 = s.Harma(5.0,0.8) #s4 = s.Harma(10.0,0.8) #s7 = s.Harma(15.0,0.8) #s2 = s.segmentByAmplitude1(0.7) #s3 = s.segmentByPower(1.) #s4 = s.medianClip(3.0) #s5 = s.onsets(3.0) #s6, p, t = s.yin(100,thr=0.5,returnSegs=True) #s7 = s.Harma(10.0,0.8) app = QtGui.QApplication([]) mw = QtGui.QMainWindow() mw.show() mw.resize(800, 600) win = pg.GraphicsLayoutWidget() mw.setCentralWidget(win) vb1 = win.addViewBox(enableMouse=False, enableMenu=False, row=0, col=0) im1 = pg.ImageItem(enableMouse=False) vb1.addItem(im1) im1.setImage(10. * np.log10(sg)) vb2 = win.addViewBox(enableMouse=False, enableMenu=False, row=1, col=0) im2 = pg.ImageItem(enableMouse=False) vb2.addItem(im2) im2.setImage(10. * np.log10(sg)) vb3 = win.addViewBox(enableMouse=False, enableMenu=False, row=2, col=0) im3 = pg.ImageItem(enableMouse=False) vb3.addItem(im3) im3.setImage(10. * np.log10(sg)) vb4 = win.addViewBox(enableMouse=False, enableMenu=False, row=3, col=0) im4 = pg.ImageItem(enableMouse=False) vb4.addItem(im4) im4.setImage(10. * np.log10(sg)) vb5 = win.addViewBox(enableMouse=False, enableMenu=False, row=4, col=0) im5 = pg.ImageItem(enableMouse=False) vb5.addItem(im5) im5.setImage(10. * np.log10(sg)) vb6 = win.addViewBox(enableMouse=False, enableMenu=False, row=5, col=0) im6 = pg.ImageItem(enableMouse=False) vb6.addItem(im6) im6.setImage(10. * np.log10(sg)) vb7 = win.addViewBox(enableMouse=False, enableMenu=False, row=6, col=0) im7 = pg.ImageItem(enableMouse=False) vb7.addItem(im7) im7.setImage(10. * np.log10(sg)) print("====") print(s1) for seg in s1: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb1.addItem(a, ignoreBounds=True) print(s2) for seg in s2: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb2.addItem(a, ignoreBounds=True) print(s3) for seg in s3: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb3.addItem(a, ignoreBounds=True) print(s4) for seg in s4: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb4.addItem(a, ignoreBounds=True) print(s5) for seg in s5: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb5.addItem(a, ignoreBounds=True) print(s6) for seg in s6: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb6.addItem(a, ignoreBounds=True) print(s7) for seg in s7: a = pg.LinearRegionItem() a.setRegion([ convertAmpltoSpec(seg[0], fs, 128), convertAmpltoSpec(seg[1], fs, 128) ]) vb7.addItem(a, ignoreBounds=True) QtGui.QApplication.instance().exec_()
#Things to add: So...kinda big problem. we dont want to delete and redraw the item each time when we can just modify the points. We create a container for each segment that holds its line, cylinder,cube etc...We then specifically modify these segments #Current Bugs: #The cylinder should overlap the lines not the other way around ''' #Unit vectors in x,y,z direction for convenience ex = np.matrix([[1], [0], [0]]) ey = np.matrix([[0], [1], [0]]) ez = np.matrix([[0], [0], [1]]) ############################# #Create the segments that make up robot. Segment is initialized in the following form # sg.Segment(type of joint, length, unit vector, zero configuration angle) s3 = sg.Segment(0, 0, ez, 0) s1 = sg.Segment(2, 1.60, ez, 0) s6 = sg.Segment(0, 0, ey, 0) s2 = sg.Segment(2, 2.00, ez, 0) s7 = sg.Segment(1, 1, ey, 0) s4 = sg.Segment(2, 1.00, ex, 0) #Organize segments into a list, order matters! segments = [s3, s1, s6, s2, s7, s4] #Organize zero config angles, order matters! Q = [-np.pi / 4, 0, 0, 0, 0.5, 0] #construct robot arm from segment list and zero config list r1 = ra.RobotArm(segments, Q)