def picture_prosess(features, targeted_column, classifier): """ Calls upon data.diabetes_dataset() and fitting.fit() to predict and calculate accuracy to be displayed on the web-page. Visualize.visualizer() will also be called if only two checkboxes are checked. The scatterplots are saved in buffers to avoid problems with matplotlib, flask, and python. There has been some problems occurring testing (Mac) with all the latest updates to the packages. Without the buffers the scatterplots would not be updated in real time when checking new checkboxes without having to restart the page. Since the scatterplots should only be displayed when 2 checkboxes are marked, the whole buffer and scatter-plot action is inside an if-statement. args: features (list:String): list containing names of features(columns) targeted_column (String): name of column classifier (String): name of classifier returns: t_ac (float): v_ac (float): img1 (string): scatter plot object 1 img2 (string): scatter plot object 2 """ data_frame, training_set, validation_set = data.diabetes_dataset() trained_classifier = fitting.fit(training_set, classifier, features, targeted_column) img1 = None img2 = None prediction1 = trained_classifier.predict(training_set[features]) t_ac = metrics.accuracy_score(training_set[targeted_column], prediction1) prediction2 = trained_classifier.predict(validation_set[features]) v_ac = metrics.accuracy_score(validation_set[targeted_column], prediction2) if(len(features) == 2): buf = BytesIO() #add to buffer (visualize.visualizer(prediction1, training_set, features)).savefig(buf, format="png") img1 = base64.b64encode(buf.getbuffer()).decode("ascii") buf = BytesIO() #add to buffer (visualize.visualizer(prediction2, validation_set, features)).savefig(buf, format="png") img2 = base64.b64encode(buf.getbuffer()).decode("ascii") return t_ac, v_ac, img1, img2
plt.figure(figsize=(10,7)) sn.heatmap(df.corr(),annot=True,cmap = 'Blues',vmin=-1,vmax=1,center=0,linewidths=2, linecolor='black') plt.xticks(fontsize=15,rotation=90) plt.yticks(fontsize=15,rotation=0) plt.title('Correlation HeatMap') #plt.show() # Instead of plt.show(), do the following: viz = visualizer() viz.jumbocard('Jumbocard Heading', plt,'My Description: This is An Important Graph') df.groupby(['Target']).mean() df.groupby(['Target']).median()
formatter_class=argparse.RawTextHelpFormatter, ) parser.add_argument("--vis", help="Will display the tree", dest='vis') parser.add_argument( "--get", help="Will get a desired component from a desired person", choices=['siblings', 'parents', 'spouse', 'children', 'cousins'], dest='get') t = text_reader('stark_family_tree.txt') args = parser.parse_args() if args.vis: visualizer('stark_family_tree.txt') if args.get == "parents": ans = input("Whose Parents would you like to find? ") while t.findPerson(ans) is None: ans = input("That person isn't in the tree, sorry. Try again: ") if t.getParents(ans) is None: print(ans, "has no parents in the tree.") else: print("The parents of ", ans, "are: ") print(", ".join(t.getParents(ans))) if args.get == "siblings": ans = input("Whose siblings would you like to find? ") while t.findPerson(ans) is None: ans = input("That person isn't in the tree, sorry. Try agian: ")
def play_game(self): shaker = sh.Shaker() shake_switch = False shake_ended = False vis = visualizer() cap = cv2.VideoCapture(0) frame_cnt = 0 if self.save_video: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( self.out_dir + "webcam_out.avi",\ fourcc, round(cap.get(5)), \ frame_size) out_mask = cv2.VideoWriter( self.out_dir + "webcam_out_mask.avi",\ fourcc, round(cap.get(5)), \ frame_size) decision_cnt = 0 finger_cnt = 0 rps = 'r' while cap.isOpened(): ret, frame = cap.read() frame_cnt += 1 if ret is False: break frame = cv2.resize(frame, frame_size) #frame = cv2.flip(frame, 0) #frame = cv2.flip(frame, 1) mask = sd.detect_skin(frame) if self.save_video: out.write(cv2.cvtColor(mask,\ cv2.COLOR_GRAY2BGR)) if shake_ended is True: if shake_switch is False: print('shake ended') shake_switch = True img1, img2 = shaker.get_minmax_image() cv2.imwrite(self.out_dir + 'webcam_max.jpg', img1) cv2.imwrite(self.out_dir + 'webcam_min.jpg', img2) scc = SkinColorClassifier(img1, img2) mask = scc.mask_image(frame) mask = sd.morphological_transform(mask) cv2.imshow('scc mask',mask) frame, finger_cnt = count_finger(frame, mask) print(finger_cnt) else: mask = sd.detect_skin(frame) decision_cnt += 1 if shake_switch is False: shake_ended = shaker.shake_detect(mask, frame) #out.write(frame) out_mask.write(cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)) frame = vis.visualize(frame, finger_cnt, decision_cnt, True) cv2.imshow('frame', frame) k = cv2.waitKey(5) & 0xFF if k == 27: break if self.save_video: out.release() out_mask.release() plt.plot(shaker.yhistory) plt.ylabel('avg y') plt.plot(shaker.smoothed) plt.ylabel('smoothed') plt.savefig(self.out_dir + "webcam_plot.png") plt.clf() cap.release() cv2.destroyAllWindows() if shake_switch: return 1 else: return 0
def play_game(self): shaker = sh.Shaker() shake_switch = False shake_ended = False vis = visualizer() cap = cv2.VideoCapture(self.in_dir + self.video_name) frame_cnt = 0 f = open(self.out_dir + self.report_name, 'a') f.write(self.video_name + ": ") pure_video_name = self.video_name.replace('.MOV', '') if self.save_video: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( self.out_dir + pure_video_name + "out.avi",\ fourcc, round(cap.get(5)), \ frame_size) avg = 0 decision_cnt = 0 finger_cnt = 0 rps = 'r' while cap.isOpened(): ret, frame = cap.read() frame_cnt += 1 if ret is False: break frame = cv2.resize(frame, frame_size) frame = cv2.flip(frame, 0) frame = cv2.flip(frame, 1) mask = sd.detect_skin(frame) #cv2.imshow('mask', mask) if self.save_video: out.write(cv2.cvtColor(mask,\ cv2.COLOR_GRAY2BGR)) if shake_ended is True: if shake_switch is False: print('shake ended') #time.sleep(2) shake_switch = True img1, img2 = shaker.get_minmax_image() #cv2.imwrite(self.out_dir + pure_video_name + '_max.jpg', img1) #cv2.imwrite(self.out_dir + pure_video_name + '_min.jpg', img2) #f.write(str(frame_cnt)) #mask = scc.mask_image(frame) mask = sd.morphological_transform(mask) frame, finger_cnt = count_finger(frame, mask) print(finger_cnt) else: mask = sd.detect_skin(frame) decision_cnt += 1 if shake_switch is False: shake_ended = shaker.shake_detect(mask, frame) frame = vis.visualize(frame, finger_cnt, decision_cnt) cv2.imshow('frame', frame) k = cv2.waitKey(5) & 0xFF if k == 27: break time.sleep(2) f.write('\n') f.close() if self.save_video: out.release() plt.plot(shaker.yhistory) plt.ylabel('avg y') plt.plot(shaker.smoothed) plt.ylabel('smoothed') plt.savefig(self.out_dir + pure_video_name + "_plot.png") plt.clf() cap.release() cv2.destroyAllWindows() if shake_switch: return 1 else: return 0
import visualize emotion_array = visualize.visualizer() emotion_array = np.array(emotion_array) emotion_array = (emotion_array / sum(emotion_array)) * 100 plt.rcParams['figure.figsize'] = (13.5, 5.5) for i in range(len(emotion_array)): axes = plt.subplot(2, 4, i) emojis_img = io.imread('images/emojis/%s.png' % str(class_names[i])) plt.imshow(emojis_img) plt.xlabel(str(emotion_array(i)), fontsize=16) axes.set_xticks([]) axes.set_yticks([]) plt.tight_layout() plt.savefig(os.path.join('images/results/{}.png'.format(i + 1))) plt.close()
self.tiles.append(x) self.board_map[self.numbers[0]].append(x) self.numbers = self.numbers[1:] x = tile("Wood", self.numbers[0], '#006600') self.tiles.append(x) self.board_map[self.numbers[0]].append(x) self.numbers = self.numbers[1:] x = tile("Sheep", self.numbers[0], '#99ff33') self.tiles.append(x) self.board_map[self.numbers[0]].append(x) self.numbers = self.numbers[1:] x = tile("Wheat", self.numbers[0], '#cccc00') self.tiles.append(x) self.board_map[self.numbers[0]].append(x) self.numbers = self.numbers[1:] self.tiles.append(tile("Desert", 0, '#999966')) shuffle(self.tiles) board = board() view = visualizer() board = view.visualize_board(board, x, y) # view.draw_settlement(board.vertices[0].coord, '#ff0000') # view.draw_road(board.vertices[0].coord, board.vertices[1].coord, '#ff0000')
GREEN = (0, 255, 0) THICKNESS = 3 if __name__ == '__main__': import cv2 import data from visualize import visualizer, cv2Window from detect import detectMultiscale def predictionCallback(img): start = timer() detections = detectMultiscale(img) if PROFILE: print('Prediction took %fs' % (timer() - start,)) for (xMin, yMin, xMax, yMax) in detections: cv2.rectangle(img, (xMin, yMin), (xMax, yMax), GREEN, THICKNESS) if TEST: visualizer(data.getTestImagePaths(), predictionCallback, WINDOW_TITLE) elif TRAIN: from train import train train(STAGE_IDX, TRAIN_CALIB) elif LIVE: from RealSense import Streamer, LiveDisplay with cv2Window(LIVE_WINDOW_TITLE) as win, Streamer() as stream: liveStream = LiveDisplay(stream, win) liveStream.run(predictionCallback)
def play_game(self): shaker = sh.DiffShaker() scc = None shake_switch = False shake_ended = False cnt_list = [] vis = visualizer() cap = cv2.VideoCapture(self.in_dir + self.video_name) print(self.in_dir + self.video_name) frame_cnt = 0 f = open(self.out_dir + self.report_name, 'a') f.write(self.video_name + ": ") pure_video_name = self.video_name.replace('.MOV', '') decision_cnt = 0 finger_cnt = 0 if self.save_video: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter( self.out_dir + pure_video_name + "out.avi",\ fourcc, round(cap.get(5)), \ frame_size) ret, prev_frame = cap.read() prev_frame = render_frame(prev_frame) while cap.isOpened(): ret, curr_frame = cap.read() frame_cnt += 1 if ret is False: break curr_frame = render_frame(curr_frame) if shake_ended is True: if shake_switch is False: # only started once print('shake ended') #time.sleep(2) shake_switch = True img1, img2 = shaker.get_minmax_image() cv2.imwrite(self.out_dir + pure_video_name + '_max.jpg', img1) cv2.imwrite(self.out_dir + pure_video_name + '_min.jpg', img2) f.write(str(frame_cnt)) scc = SkinColorClassifier(img1, img2) start_time = time.time() mask = scc.mask_image(curr_frame) curr_frame, finger_cnt = count_finger(curr_frame, mask) print(time.time() - start_time) else: mask = sd.detect_skin(curr_frame) decision_cnt += 1 if shake_switch is False: mask, shake_ended = \ shaker.shake_detect(prev_frame, curr_frame) cv2.imshow('mask', mask) if self.save_video: out.write(cv2.cvtColor(mask,\ cv2.COLOR_GRAY2BGR)) prev_frame = curr_frame curr_frame = vis.visualize(curr_frame, finger_cnt, decision_cnt) cv2.imshow('frame', curr_frame) k = cv2.waitKey(5) & 0xFF if k == 27: break f.write('\n') f.close() if self.save_video: out.release() plt.plot(shaker.yhistory) plt.ylabel('avg y') plt.plot(shaker.smoothed) plt.ylabel('smoothed') plt.savefig(self.out_dir + pure_video_name + "_plot.png") plt.clf() plt.plot(cnt_list) plt.savefig(self.out_dir + pure_video_name + \ "_finger_plot.png") plt.clf() cap.release() cv2.destroyAllWindows() return cnt_list