def test_live(model): cap = cv2.VideoCapture(0) drum_area1 = DrumArea(top_left_corner=(50, 50), square_dim=320, sound='c') dareas = [drum_area1] area_listener = AreaListener(drum_areas=dareas) img_process = ImageProcessor() base_set = False base_imgs = None while True: _, frame_orig = cap.read() frame_orig = img_process.horizontal_flip(frame_orig) frame_color = frame_orig.copy() frame = frame_color.copy() if not base_set: area_listener.set_base_image(frame) base_imgs = area_listener2.get_base_imgs() base_set = True for drum_area in dareas: orig, target = drum_area.get_area(frame_orig), drum_area.base_img img_copy = orig.copy() diff = cv2.absdiff(target, orig) diff_gray = np.asarray(cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)) diff_gray = cv2.resize(diff_gray, (80, 80)) diff_gray = tf.reshape(diff_gray, [80, 80, 1]) center = model.predict(np.asarray([diff_gray.numpy() / 255.0]))[0] cv2.circle(img_copy, tuple(map(int, center)), 9, (30, 110, 200), -1) cv2.imshow('Pred', img_copy) cv2.waitKey(1) area_listener2.draw_area(frame_color) key = cv2.waitKey(1) cv2.imshow('Main', frame_color) cv2.waitKey(1) if key == ord('s'): print('resetting base') area_listener2.set_base_image(frame) base_imgs = area_listener2.get_base_imgs() if key & 0xFF == ord('q'): break
def collect_samples(data_type, num_samples, saveOnKey=False): s_activated = False directory = f'{ROOT_SAMPLES_DIR}/{data_type}/' os.system(f'mkdir {directory}') last_file_name = get_last_file_name(directory) cap = cv2.VideoCapture(0) drum_area = DrumArea(top_left_corner=(50, 50), square_dim=SQUARE_DIM, sound='j') #drum_area2 = DrumArea(top_left_corner=(800, 50), square_dim=SQUARE_DIM, sound='j') area_listener = AreaListener(drum_areas=[drum_area]) img_process = ImageProcessor() count = last_file_name - 1 while True: _, frame = cap.read() #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = img_process.horizontal_flip(frame) targeted_area = area_listener.get_all_target_areas(img=frame)[0] area_listener.draw_area(frame) cv2.imshow('Target', targeted_area) cv2.imshow('Main', frame) key = cv2.waitKey(1) if key == ord('s'): if not s_activated: print('S activated') s_activated = True count = last_file_name + 1 if count > last_file_name and count <= (num_samples + last_file_name): if saveOnKey and key != ord('a'): continue #file_name = get_file_name(data_type, count) file_name = 'mask_images/stick2.jpg' cv2.imwrite(file_name, targeted_area) count += 1 print(f'Saved {file_name}') elif count > num_samples + last_file_name: return if key & 0xFF == ord('q'): break
def test_live_effecient(square_dim=320): cap = cv2.VideoCapture(0) img_process = ImageProcessor() base_imgs = None test_max_black_pixel_count = 0 drum_area1 = DrumArea(top_left_corner=(100,10), square_dim=square_dim, sound='j') drum_area2 = DrumArea(top_left_corner=(100,320), square_dim=square_dim, sound='c') drum_areas = [drum_area1, drum_area2] area_listener = AreaListener(drum_areas = drum_areas) last_states = [False for i in range(len(drum_areas))] max_black_pixel = [0 for i in range(len(drum_areas))] while True: _, frame_orig = cap.read() frame_orig = img_process.horizontal_flip(frame_orig) area_listener.draw_area(frame_orig) if not base_imgs: area_listener.set_base_image(frame_orig) base_imgs = area_listener.get_base_imgs(resize_dim=RESIZE_DIM) target_areas = area_listener.get_all_target_areas(\ frame_orig, resize_dim=RESIZE_DIM) for i,target_area in enumerate(target_areas): diff = cv2.absdiff(target_area, base_imgs[i]) diff_gray = np.asarray(cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)) if test_max_black_pixel_count < 100: max_black_pixel[i] = max(max_black_pixel[i], max(diff_gray.flatten())) else: diff_gray[diff_gray > max_black_pixel[i]] = 255 diff_gray[diff_gray <= max_black_pixel[i]] = 0 num_whites = len(diff_gray[diff_gray == 255]) if num_whites > THRESHOLD_NUM_WHITES: if not last_states[i]: last_states[i] = True drum_areas[i].playSound() drum_areas[i].markPlayed(frame_orig) else: last_states[i] = False cv2.waitKey(1) test_max_black_pixel_count += 1 cv2.imshow('Main', frame_orig) key = cv2.waitKey(1) if key & 0xFF == ord('q'): break
def test_model_live(model): cap = cv2.VideoCapture(0) drum_area2 = DrumArea(top_left_corner=(100, 100), square_dim=SQUARE_DIM, sound='j') area_listener2 = AreaListener(drum_areas=[drum_area2]) img_process2 = ImageProcessor() probability_model2 = tf.keras.Sequential( [model, tf.keras.layers.Softmax()]) while True: _, frame = cap.read() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = img_process2.horizontal_flip(frame) targeted_area = area_listener2.get_all_target_areas(img=frame)[0] area_listener2.draw_area(frame) cv2.imshow('Main', frame) # if prediction == 0: # sp.play_key(ord(drum_area.sound)) key = cv2.waitKey(1) if key == ord('s'): file_name = 'tp.jpg' cv2.imwrite(file_name, targeted_area) to_predict_img = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE) to_predict_list = np.asarray([np.asarray(to_predict_img)]) prediction, _ = utils.predict(to_predict_list[0], probability_model2, preprocess=False) cv2.imshow('Target', to_predict_img) print(f'PREDICTION={prediction}') if key & 0xFF == ord('q'): break
def test_live(model, adaptive_threshold=GLOBAL_ADAPTIVE_THRESHOLD): # if True: # return cap = cv2.VideoCapture(0) # drum_area = DrumArea(top_left_corner=(900, 100), square_dim=RESIZE_DIM, sound='c') # drum_area2 = DrumArea(top_left_corner=(100, 100), square_dim=RESIZE_DIM, sound='j') # drum_area3 = DrumArea(top_left_corner=(100, 400), square_dim=RESIZE_DIM, sound='k') drum_area1 = DrumArea(top_left_corner=(50, 50), square_dim=320, sound='c') drum_area2 = DrumArea(top_left_corner=(850, 50), square_dim=320, sound='j') dareas = [drum_area1]#, drum_area2] area_listener2 = AreaListener(drum_areas=dareas) img_process2 = ImageProcessor() probability_model2 = tf.keras.Sequential([model, tf.keras.layers.Softmax()]) base_set = False base_imgs = None while True: _, frame_orig = cap.read() frame_orig = img_process2.horizontal_flip(frame_orig) frame_color = frame_orig.copy() #frame = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2GRAY) frame = frame_color.copy() if not base_set: area_listener2.set_base_image(frame) base_imgs = area_listener2.get_base_imgs(resize_dim=(RESIZE_DIM, RESIZE_DIM)) base_set = True for drum_area in dareas: process_area_cv2Diff(drum_area, frame_orig, probability_model2) area_listener2.draw_area(frame_color) key = cv2.waitKey(1) cv2.imshow('Main', frame_color) cv2.waitKey(1) if key & 0xFF == ord('q'): break
print(img_name) img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE) img = cv2.resize(img, (32, 32)) zeros.append(draw_histogram(img, bin_width=16)) print('-------------------------------') print('\n') print(sorted(zeros)) # draw_all_histograms(NO_STICK_DATATYPE) cap = cv2.VideoCapture(0) drum_area = DrumArea(top_left_corner=(50, 50), square_dim=300, sound='j') area_listener = AreaListener(drum_areas=[drum_area]) orig = None while True: _, frame_orig = cap.read() frame_orig = cv2.flip(frame_orig, 1) frame_color = frame_orig.copy() frame = cv2.cvtColor(frame_orig, cv2.COLOR_BGR2GRAY) frame = np.asarray(frame) area_listener.draw_area(frame) ar = drum_area.get_area(frame) ar = cv2.resize(ar, (32, 32))
def collect_preprocessed_samples(data_type, num_samples, saveOnKey=False, add_random_whiteness=False): s_activated = False directory = f'{ROOT_SAMPLES_DIR}/{data_type}/' os.system(f'mkdir {directory}') last_file_name = get_last_file_name(directory) cap = cv2.VideoCapture(0) drum_area = DrumArea(top_left_corner=(50, 50), square_dim=SQUARE_DIM, sound='j') #drum_area2 = DrumArea(top_left_corner=(800, 50), square_dim=SQUARE_DIM, sound='j') area_listener = AreaListener(drum_areas=[drum_area]) img_process = ImageProcessor() count = last_file_name - 1 base_set = False base_imgs = None max_black = 0 while True: _, frame = cap.read() #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame = img_process.horizontal_flip(frame) targeted_area = area_listener.get_all_target_areas( img=frame, resize_dim=RESIZE_DIM)[0] area_listener.draw_area(frame) cv2.imshow('Target', targeted_area) cv2.imshow('Main', frame) if not base_set: area_listener.set_base_image(frame) base_imgs = area_listener.get_base_imgs(resize_dim=RESIZE_DIM) base_set = True diff = cv2.absdiff(targeted_area, base_imgs[0]) diff_gray = np.asarray(cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)) if add_random_whiteness: base = random.randint(0, 10) for i in range(RESIZE_DIM[0]): for j in range(RESIZE_DIM[0]): diff_gray[i][j] += (base + random.randint(0, 3)) cv2.imshow(f'diff_abs', diff_gray) #print(diff_gray) diff_gray = np.asarray(diff_gray) diff_gray_flat = diff_gray.flatten() key = cv2.waitKey(1) if key == ord('s'): if not s_activated: print('S activated at max_black =', max_black) s_activated = True count = last_file_name + 1 if count > last_file_name and count <= (num_samples + last_file_name): if saveOnKey and key != ord('a'): continue # diff_gray[diff_gray > max_black] = 255 # diff_gray[diff_gray <= max_black] = 0 file_name = get_file_name(data_type, count) cv2.imwrite(file_name, diff_gray) count += 1 print(f'Saved {file_name}') elif count > num_samples + last_file_name: return else: #print('chilling...') max_black = max(max_black, max(diff_gray_flat)) if key & 0xFF == ord('q'): break
def test_live(): # if True: # return cap = cv2.VideoCapture(0) drum_area = DrumArea(top_left_corner=(50, 50), square_dim=320, sound='c') drum_area2 = DrumArea(top_left_corner=(800, 50), square_dim=160, sound='j') area_listener2 = AreaListener(drum_areas=[drum_area2]) img_process2 = ImageProcessor() imDfTool = ImageDifferenceTool() base_set = False base_imgs = None drum_areas_set = False max_perc = -1 max_black_pixel = 0 test_max_black_pixel_count = 0 drum_area = DrumArea(top_left_corner=(0,0), square_dim=100, sound='j') while True: _, frame_orig = cap.read() if not drum_areas_set: area_listener2.drum_areas = [drum_area] drum_areas_set = True frame_orig = img_process2.horizontal_flip(frame_orig) area_listener2.draw_area(frame_orig) if not base_set: area_listener2.set_base_image(frame_orig) base_imgs = area_listener2.get_base_imgs() # for i, bi in enumerate(base_imgs): # cv2.imshow(f'base{i}', bi) # cv2.waitKey(1) base_set = True target_areas = area_listener2.get_all_target_areas(frame_orig) diffs = [] #https://stackoverflow.com/questions/27035672/cv-extract-differences-between-two-images for i,ta in enumerate(target_areas): diff = cv2.absdiff(ta, base_imgs[i]) mask = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) th = 1 imask = mask>th #print(imask) canvas = np.zeros_like(ta, np.uint8) canvas[imask] = ta[imask] # mask = np.asarray(mask) # mask[mask>] # orig = cv2.cvtColor(base_imgs[i], cv2.COLOR_BGR2GRAY) # #rig = cv2.threshold(orig,100,255,cv2.THRESH_BINARY)[1] # new = cv2.cvtColor(ta, cv2.COLOR_BGR2GRAY) #new = cv2.threshold(new,127,255,cv2.THRESH_BINARY)[1] #img, mask = imDfTool.ColorDiffRob(orig, new) #mask = cv2.medianBlur(mask, 5) diff_gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY) diff_gray = np.asarray(diff_gray) diff_gray_flat = diff_gray.flatten() average = np.sum(diff_gray_flat)/len(diff_gray_flat) if test_max_black_pixel_count < 100: max_black_pixel = max(max_black_pixel, max(diff_gray_flat)) else: diff_gray[diff_gray > max_black_pixel+10] = 255 diff_gray[diff_gray <= max_black_pixel+10] = 0 cv2.circle(frame_orig, (500,100), 90, (100,140,10), -1) if len(diff_gray[diff_gray == 255] > 2): drum_area.playSound() test_max_black_pixel_count += 1 print(average, 'average', max(diff_gray_flat), 'max') # diff_gray[diff_gray > 35] = 255 # cv2.imshow(f'canvase{i}', canvas) cv2.imshow(f'diff_gray{i}', diff_gray) # cv2.imshow(f'orig{i}', base_imgs[i]) # cv2.imshow(f'target{i}', ta) # perc = imDfTool.GetWhitePercentage(mask) # max_perc = max(max_perc, perc) # cv2.imshow(f'orig{i}', orig) cv2.waitKey(1) # cv2.imshow(f'new{i}', new) # cv2.waitKey(1) # print(perc, max_perc) # if perc > 0.0: # drum_area2.playSound() # area_id = 2 # cv2.circle(frame_orig, (500,100*area_id), 90, ((area_id * 30 + 430) % 255,(area_id * 100) % 255, (20 * area_id) % 255), -1) # #drum_area2.markPlayed(frame_orig) # drum_area2.is_clear = False # else: # drum_area2.is_clear = True cv2.imshow('Main', frame_orig) key = cv2.waitKey(1) if key & 0xFF == ord('q'): break