print('cvt color %f' % (time.time() - start_time)) # 切り出し img_two = img_two[300:600, 200:600] # 2倍に拡大 img_two = cv2.resize(img_two, None, fx=2, fy=2) # 認識 detectionResults = detectMusic.yoloDetect_net(img_two, net) print('detection %f' % (time.time() - start_time)) #detectionResults = detectMusic.yoloDetect(img_two) print("detectionResults", detectionResults) # 認識結果を楽譜形式に変換 currentNote = imageYOLO.makeSound(detectionResults) print('currentNote %f' % (time.time() - start_time)) print("currentNote", currentNote) # 認識結果を表示 detectMusic.writeBoundingBox(detectionResults, img_two, i) print('bounding box %f' % (time.time() - start_time)) ''' if not detectionResults: # 認識結果が空なら終了 # move.stop() break ''' # 新規楽譜の抽出 newNote = imageYOLO.findNewNotes(currentNote, musicList) print('newNote %f' % (time.time() - start_time)) # 新規楽譜を結合 musicList.extend(newNote) #楽譜をev3に送信する #if newNote != []: # postData.postMusic(newNote)
# 切り出し img_two = img_two[300:600, 200:600] # 2倍に拡大 img_two = cv2.resize(img_two, None, fx = 2, fy = 2) # 認識 detectionResults = detectMusic.yoloDetect_net(img_two, net) # detectionResults = [(b'u_do', 0.9993329048156738, (224.17959594726562, 378.47900390625, 178.75448608398438, 328.29620361328125))] # test print('detection %f' % (time.time()-start_time)) #detectionResults = detectMusic.yoloDetect(img_two) print("detectionResults", detectionResults) # 認識結果を楽譜形式に変換 currentNote = imageYOLO.makeSound(detectionResults) print('currentNote %f' % (time.time()-start_time)) print("currentNote", currentNote) # 認識結果を表示 detectMusic.writeBoundingBox(detectionResults, img_two, int(imageNum)) #print('bounding box %f' % (time.time() - start_time)) # if not detectionResults: # # 認識結果が空なら終了 # move.stop() # break # 新規楽譜の抽出 newNote = imageYOLO.findNewNotes(currentNote, musicList) print('newNote %f' % (time.time()-start_time)) # 新規楽譜を結合 musicList.extend(newNote) #楽譜をev3に送信する if newNote != []: #postData.postMusic(newNote) pass