def scrape_world_select(): dt = datetime.now() response = get_osrs_world_select() status_code = response.status_code if (response.ok): world_data, total_player_data = extract_data(response) world_data, total_player_count = (transform_data( world_data, total_player_data, dt)) load_data(world_data, total_player_count) else: print('Bad Response - HTTP', status_code) update_logs(dt, status_code)
def make_prediction(): if request.method == 'POST': #Get Image image = request.files['image'] if not image: return render_template('index.html', label='No file f****r') image.save(os.path.join(app.config['UPLOAD_FOLDER'], image.filename)) # Trasnform data hist = transform.transform_data('images/' + image.filename) # Make prediction prediction = svm.predict([hist]) # Squeeze value label = str(np.squeeze(v.get_value_key_str(prediction))) return render_template('index.html', label=label)
import extract import transform import load if __name__ == '__main__': x = 'https://usc.data.socrata.com/api/views/kygc-fzgm/rows.csv?accessType=DOWNLOAD' extracted = extract.extracted_data(x) transformed = transform.transform_data(extracted) loaded = load.loaded_data(transformed)
transform.load_all('world/psm2_recordings.txt')) psm2_calibration_matrix = transform.psm_data_to_matrix( psm2_calibration_data) endoscope_calibration_matrix = np.matrix( list(read_camera.load_all('world/endoscope_chesspts.p'))[0]) """ Get the coordinates of most recently found needle centers (in endoscope frame) """ needle_points = np.matrix( list(read_needle.load_all('needle_data/needle_points.p'))[0]) if USE_WORLD_TRANSFORM: world = transform.generate_world() TE_W = rigid_transform.solve_for_rigid_transformation( endoscope_calibration_matrix, world) needle_to_world = transform.transform_data("Endoscope", "World", needle_points, TE_W) needle_to_world[:, 2] = 0. pprint.pprint(needle_to_world) TW_2 = rigid_transform.solve_for_rigid_transformation( world, psm2_calibration_matrix) world_to_psm2 = transform.transform_data("World", "PSM2", needle_to_world, TW_2) pprint.pprint(world_to_psm2) """ Move to needle centers, pcik them up, and release them """ pickup(psm2, world_to_psm2.tolist(), z_upper, z_lower) else: """ Solve for the transform between endoscope to PSM2 """ TE_2 = rigid_transform.solve_for_rigid_transformation( endoscope_calibration_matrix, psm2_calibration_matrix)
def process_image(self, image): thresh = self.preprocess(image) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # All potential smaller-end needle protrusions residuals = [ c for c in contours if self.residual_lower < cv2.contourArea(c) < self.residual_upper ] not_found = True for r in residuals: cv2.drawContours(image, [r], 0, (0, 255, 0), 2) for c in contours: # Get moments and area for given contour M = cv2.moments(c) area = cv2.contourArea(c) # Throw out all non-needle contours if not_found and (self.area_lower < area < self.area_upper): # Compute the centroid (center of mass) and center of the given needle centroid_x, centroid_y = self.compute_centroid(c, M) closest = np.vstack(self.center(c, centroid_x, centroid_y)).squeeze() cx, cy = closest[0], closest[1] center = (cx, cy) # Fit an ellipse to the contour ellipse, ellipse_aspect, ellipse_area = self.get_ellipse(c) """Contour is the big protruding part of the needle""" if self.ellipse_lower < ellipse_area < self.ellipse_upper: not_found = False # Report/display the large residual cv2.putText(image, "centroid", (centroid_x - 20, centroid_y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.circle(image, center, 10, (255, 0, 0), -1) # cv2.circle(image, (centroid_x, centroid_y), 10, (255, 255, 255), -1) self.report(area, centroid_x, centroid_y, cx, cy, ellipse_area, 'LARGE RESIDUAL') # cv2.ellipse(image, ellipse, (0, 0, 255), 2) cv2.drawContours(image, [c], 0, (0, 255, 255), 2) # Find the corresponding small residual and markup residual = self.find_residual(center, residuals) if residual is not None: print("SMALL RESIDUAL", cv2.contourArea(residual)) print(self.get_ellipse(residual)[-2]) residual_centroid = self.compute_centroid(residual) cv2.putText(image, "residual", residual_centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) cv2.drawContours(image, [residual], 0, (255, 255, 255), 2) cv2.circle(image, residual_centroid, 10, (255, 0, 0), -1) # Fit a line to the small residual [vx, vy, x, y] = cv2.fitLine(residual, cv2.DIST_L2, 0, 0.01, 0.01) dx, dy = np.asscalar(vx), np.asscalar(vy) # rows, cols = image.shape[:2] # lefty = int((-x*vy/vx) + y) # righty = int(((cols-x)*vy/vx)+y) # cv2.line(image,(cols-1,righty),(0,lefty),(0,255,0),2) """Finds a pull point (relative to contour center) in the direction of the best fit line of the smaller residual and opposite (not towards) the smaller residual """ if self.distance(residual_centroid, center) > \ self.distance(residual_centroid, (cx + dx, cy + dy)): dx, dy = -dx, -dy pull_x = int(cx + 200 * dx) pull_y = int(cy + 200 * dy) cv2.circle(image, (pull_x, pull_y), 10, (0, 0, 0), -1) cv2.line(image, center, (pull_x, pull_y), (0, 0, 0), 2) # Compute points in right camera frame (residual center, contour center, pull point) left_center = np.matrix([cx, cy, 0]) left_pull = np.matrix([pull_x, pull_y, 0]) right_center = transform.transform_data("Left Frame", "Right Frame", left_center, self.TL_R, verbose=False) right_pull = transform.transform_data("Left", "Right", left_pull, self.TL_R, verbose=False) right_cx = int(right_center[0, 0]) right_cy = int(right_center[0, 1]) right_pull_x = int(right_pull[0, 0]) right_pull_y = int(right_pull[0, 1]) cv2.circle(self.right_image, (right_cx, right_cy), 10, (0, 0, 0), -1) cv2.circle(self.right_image, (right_pull_x, right_pull_y), 10, (0, 0, 0), -1) cv2.line(self.right_image, (right_cx, right_cy), (right_pull_x, right_pull_y), (0, 0, 0), 2)
import model import transform import load import numpy # from hyperopt import fmin, tpe, hp # space = [hp.quniform('lr', 0.00001, 1, 0.00001), # hp.quniform('bs', 100, 10000, 100), # hp.quniform('fhl', 10, 200, 10), # hp.quniform('shl', 10, 200, 10)] numpy.set_printoptions(threshold='nan') numpy.set_printoptions(precision=2) transform.transform_data ("/home/bogdan/work/repos/ml-tloe/serps/results/*", 'expanded', 10000) data = load.read_data_sets ('expanded/*',0.3,0.1, num = 00000); model.create ( H1=1, H2=50 ) # model.train (data, learning_rate=0.001, batch_size=100000, lmbda=0, ermul=10000, restore=False) model.run(data) ################################################################################ # def cost ((lr, bs, fhl, shl)): # return model.train_once (data, lr, int(bs), 0, int(fhl), int(shl), 31, 1) #(data, 0.003, 5000, 0, 150, 50, 31, 1) # best = fmin(cost, # space, # algo=tpe.suggest,
def process_image(self, image): left, right = [], [] im2, contours, hierarchy = cv2.findContours(self.preprocess(image), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for c in contours: M = cv2.moments(c) area = cv2.contourArea(c) if int(M["m00"]) != 0 and (self.area_lower < area < self.area_upper): cX = int(M["m10"] / M["m00"]) cY = int(M["m01"] / M["m00"]) closest = np.vstack(self.closest_to_centroid(c, cX, cY)).squeeze() CX, CY = closest[0], closest[1] ellipse = cv2.fitEllipse(c) (x, y), (ma, MA), angle = ellipse aspect_ratio = ma / MA ellipse_area = (np.pi * ma * MA) / 4 left_center = (closest[0], closest[1]) if (0.75 < aspect_ratio < 1.0) and self.ellipse_area_lower < ellipse_area: left_data = np.matrix([[CX, CY, 0]]) right_data = transform.transform_data("Left Camera", "Right Camera", left_data, self.TL_R, data_out=None, verbose=False) right_center = (int(right_data[0, 0]), int(right_data[0, 1])) left.append(left_center) right.append(right_center) self.report(c, area, cX, cY, closest, ellipse_area, angle) cv2.drawContours(self.left_image, [c], -1, (0, 255, 0), 2) cv2.ellipse(self.left_image, ellipse, (255, 0, 0), 2) cv2.circle(self.left_image, (cX, cY), 7, (255, 255, 255), -1) cv2.putText(self.left_image, "center", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.circle(self.left_image, left_center, 10, (0, 0, 0), -1) cv2.circle(self.right_image, right_center, 10, (0, 0, 0), -1) # else: # cv2.drawContours(image, [c], -1, (0, 0, 255), 2) # cv2.ellipse(image, ellipse, (0, 0, 255), 2) # cv2.putText(image, "REJECTED", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) if len(right) > 0 and len(right) == len(left): pts3d = self.get_points_3d(left, right) print("Found") self.pts = [(p.point.x, p.point.y, p.point.z) for p in pts3d] pprint.pprint(self.pts) with open('needle_data/needle_points.p', "w+") as f: pickle.dump(self.pts, f) rospy.signal_shutdown("Finished.")
def process_image(self, image): left, right = [], [] thresh = self.preprocess(image) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # All potential smaller-end needle protrusions residuals = [ c for c in contours if self.residual_lower < cv2.contourArea(c) < self.residual_upper ] not_found = True # for r in residuals: # cv2.drawContours(image, [r], 0, (0, 255, 0), 2) for c in contours: # Get moments and area for given contour M = cv2.moments(c) area = cv2.contourArea(c) # Throw out all non-needle contours if not_found and (self.area_lower < area < self.area_upper): # Compute the centroid (center of mass) and center of the given needle centroid_x, centroid_y = self.compute_centroid(c, M) closest = np.vstack(self.center(c, centroid_x, centroid_y)).squeeze() cx, cy = closest[0], closest[1] center = (cx, cy) # Fit an ellipse to the contour ellipse, ellipse_aspect, ellipse_area = self.get_ellipse(c) """Contour is the big protruding part of the needle""" if self.ellipse_lower < ellipse_area < self.ellipse_upper: not_found = False # Report/display the large residual cv2.putText(image, "centroid", (centroid_x - 20, centroid_y - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.circle(image, center, 10, (255, 0, 0), -1) # cv2.circle(image, (centroid_x, centroid_y), 10, (255, 255, 255), -1) self.report(area, centroid_x, centroid_y, cx, cy, ellipse_area, 'LARGE RESIDUAL') # cv2.ellipse(image, ellipse, (0, 0, 255), 2) cv2.drawContours(image, [c], 0, (180, 30, 170), 5) # Find the corresponding small residual and markup residual = self.find_residual(center, residuals) if residual is not None: print("SMALL RESIDUAL", cv2.contourArea(residual)) residual_centroid = self.compute_centroid(residual) cv2.putText(image, "residual", residual_centroid, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0)) cv2.drawContours(image, [residual], 0, (0, 255, 0), 5) cv2.circle(image, residual_centroid, 10, (255, 0, 0), -1) # Fit a line to the small residual [vx, vy, x, y] = cv2.fitLine(residual, cv2.DIST_L2, 0, 0.01, 0.01) dx, dy = np.asscalar(vx), np.asscalar(vy) # rows, cols = image.shape[:2] # lefty = int((-x*vy/vx) + y) # righty = int(((cols-x)*vy/vx)+y) # cv2.line(image,(cols-1,righty),(0,lefty),(0,255,0),2) """Finds a pull point (relative to contour center) in the direction of the best fit line of the smaller residual and opposite (not towards) the smaller residual """ if self.distance(residual_centroid, center) > \ self.distance(residual_centroid, (cx + dx, cy + dy)): dx, dy = -dx, -dy # <<<<<<< HEAD:stereo/find.py pull_x = int(cx + 350 * dx) pull_y = int(cy + 350 * dy) # ======= # pull_x = int(cx + 200*dx) # pull_y = int(cy + 200*dy) # >>>>>>> 4d032d223969dc9f8c8777bfcf2e2dc2f63469e1:stereo/stereo_find_embedded_best_fit.py cv2.circle(image, (pull_x, pull_y), 10, (0, 0, 0), -1) cv2.line(image, center, (pull_x, pull_y), (0, 0, 0), 2) # Compute points in right camera frame (residual center, contour center, pull point) left_center = np.matrix([cx, cy, 0]) left_pull = np.matrix([pull_x, pull_y, 0]) right_center = transform.transform_data("Left Frame", "Right Frame", left_center, self.TL_R, verbose=False) right_pull = transform.transform_data("Left", "Right", left_pull, self.TL_R, verbose=False) right_cx = int(right_center[0, 0]) right_cy = int(right_center[0, 1]) right_pull_x = int(right_pull[0, 0]) right_pull_y = int(right_pull[0, 1]) cv2.circle(self.right_image, (right_cx, right_cy), 10, (0, 0, 0), -1) cv2.circle(self.right_image, (right_pull_x, right_pull_y), 10, (0, 0, 0), -1) cv2.line(self.right_image, (right_cx, right_cy), (right_pull_x, right_pull_y), (0, 0, 0), 2) left.append(center) left.append((pull_x, pull_y)) right.append((right_cx, right_cy)) right.append((right_pull_x, right_pull_y)) # elif 250 < area < 500: # cv2.drawContours(image, [c], 0, (0, 255, 255), 2) if len(right) > 0 and len(right) == len(left): pts3d = self.get_points_3d(left, right) print("Found") self.pts = [(p.point.x, p.point.y, p.point.z) for p in pts3d] pprint.pprint(self.pts) with open('needle_data/needle_points.p', "w+") as f: pickle.dump(self.pts, f) rospy.signal_shutdown("Finished.")
def process_image(self, image): left, right = [], [] residuals = [] thresh = self.preprocess(image) im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # residuals = [] for c in contours: M = cv2.moments(c) area = cv2.contourArea(c) # if (self.residual_lower < area < self.residual_upper): # residuals.append(c) if (self.area_lower < area < self.area_upper): cx, cy = self.compute_centroid(c, M) closest = np.vstack(self.center(c, cx, cy)).squeeze() CX, CY = closest[0], closest[1] true_center = (CX, CY) ellipse, ellipse_aspect, ellipse_area = self.get_ellipse(c) """Contour is the big protruding part of the needle""" if self.ellipse_lower < ellipse_area < self.ellipse_upper: endpoint = tuple( np.vstack(self.endpoint(c, cx, cy)).squeeze()) EX, EY = endpoint[0], endpoint[1] dx, dy = CX - EX, CY - EY OX, OY = CX + dx, CY + dy # Need (OX, OY), (CX, CY) in the right frame opp_array = np.array([OX, OY, 0]) center_array = np.array([CX, CY, 0]) left_data = np.matrix([opp_array, center_array]) right_data = transform.transform_data("Left Camera", "Right Camera", left_data, self.TL_R, data_out=None, verbose=False) right_OX, right_OY = int(right_data[0, 0]), int(right_data[0, 1]) right_CX, right_CY = int(right_data[1, 0]), int(right_data[1, 1]) left.append(true_center) left.append((OX, OY)) right.append((right_CX, right_CY)) right.append((right_OX, right_OY)) self.report(area, cx, cy, CX, CY, ellipse_area) cv2.putText(self.right_image, "center", (right_CX - 20, right_CY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.circle(self.right_image, (right_OX, right_OY), 10, (255, 0, 0), -1) cv2.circle(self.right_image, (right_CX, right_CY), 10, (0, 0, 0), -1) cv2.line(self.right_image, (right_OX, right_OY), (right_CX, right_CY), (0, 0, 0), 10) cv2.putText(image, "center", (cx - 20, cy - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) cv2.circle(image, true_center, 10, (0, 0, 0), -1) cv2.circle(image, (cx, cy), 10, (255, 255, 255), -1) cv2.ellipse(image, ellipse, (0, 0, 255), 2) cv2.drawContours(image, [c], 0, (0, 255, 255), 2) # cv2.circle(image, (EX, EY), 10, (0, 170, 0), -1) cv2.circle(image, (OX, OY), 10, (255, 0, 0), -1) # cv2.line(image, true_center, (EX, EY), (255, 0, 0), 10) cv2.line(image, true_center, (OX, OY), (0, 0, 0), 10) # else: # cv2.drawContours(image, [c], -1, (0, 0, 255), 2) # cv2.ellipse(image, ellipse, (0, 0, 255), 2) # cv2.putText(image, "REJECTED", (cX - 20, cY - 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2) if len(right) > 0 and len(right) == len(left): for pair in zip(right, left): print(self.distance( np.array(pair[0]).reshape(1, 2), (pair[1]))) pts3d = self.get_points_3d(left, right) print("Found") self.pts = [(p.point.x, p.point.y, p.point.z) for p in pts3d] pprint.pprint(self.pts) with open('needle_data/needle_points.p', "w+") as f: pickle.dump(self.pts, f) rospy.signal_shutdown("Finished.")
except EOFError: return def print_cache(lst, heading): print(heading) print('---') pprint.pprint(lst) if __name__ == '__main__': endoscope_chesspts = list(load_all('camera_data/endoscope_chesspts.p')) # camera_info = list(load_all('camera_data/camera_info.p')) left_chesspts = np.matrix(list(load_all('camera_data/left_chesspts'))[0]) right_chesspts = np.matrix(list(load_all('camera_data/right_chesspts'))[0]) z = np.zeros((25, 1)) left_chesspts = np.hstack((left_chesspts, z)) right_chesspts = np.hstack((right_chesspts, z)) print_cache(endoscope_chesspts, "ENDOSCOPE CHESSPOINTS") # print_cache(camera_info, "CAMERA INFO") print_cache(left_chesspts, "LEFT CHESSPOINTS") print_cache(right_chesspts, "RIGHT CHESSPOINTS") TL_R = transform.get_transform("Left Camera", "Right Camera", left_chesspts, right_chesspts) L_R = transform.transform_data("Left Camera", "Right Camera", left_chesspts, TL_R, right_chesspts)
pos2 = PyKDL.Vector(-0.0972128, -0.0170138, -0.106974) sideways = PyKDL.Rotation(-0.453413, 0.428549, -0.781513, -0.17203, 0.818259, 0.548505, 0.874541, 0.383143, -0.297286) """ Move to arbitrary start position (near upper left corner) & release anything gripper is holding. """ home(psm2, pos2, sideways) """ Get PSM and endoscope calibration data (25 corresponding chess points) """ psm2_calibration_data = list( transform.load_all('utils/psm2_recordings.txt')) psm2_calibration_matrix = transform.fit_to_plane( transform.psm_data_to_matrix(psm2_calibration_data)) endoscope_calibration_matrix = transform.fit_to_plane( np.matrix( list(read_camera.load_all('camera_data/endoscope_chesspts.p'))[0])) world = transform.generate_world() TE_2 = transform.get_transform("Endoscope", "PSM2", endoscope_calibration_matrix, psm2_calibration_matrix) psme_2 = transform.transform_data("Endoscope", "PSM2", endoscope_calibration_matrix, TE_2, psm2_calibration_matrix) pprint.pprint(psme_2) """ Move to chessboard corner, descend, come up,and go to next. """ move_to(psm2, psme_2.tolist(), z_upper) home(psm2, pos, rot)
pos2 = PyKDL.Vector(-0.0972128, -0.0170138, -0.106974) sideways = PyKDL.Rotation(-0.453413, 0.428549, -0.781513, -0.17203, 0.818259, 0.548505, 0.874541, 0.383143, -0.297286) """ Move to arbitrary start position (near upper left corner) & release anything gripper is holding. """ # home(psm2, pos, rot) home(psm2, pos2, sideways) """ Get PSM and endoscope calibration data (25 corresponding chess points) """ psm2_calibration_data = list( transform.load_all('../utils/psm2_recordings.txt')) psm2_calibration_matrix = transform.psm_data_to_matrix( psm2_calibration_data) endoscope_calibration_matrix = np.matrix( list(read_camera.load_all('../camera_data/endoscope_chesspts.p'))[0]) """ Get the coordinates of most recently found needle centers (in endoscope frame) """ needle_points = np.matrix( list(read_needle.load_all('needle_data/needle_points.p'))[0]) """ Solve for the transform between endoscope to PSM2 """ TE_2 = transform.get_transform("Endoscope", "PSM2", endoscope_calibration_matrix, psm2_calibration_matrix) needle_to_psm2 = transform.transform_data("Endoscope", "PSM2", needle_points, TE_2) pprint.pprint(needle_to_psm2) """ Move to needle centers, pcik them up, and release them """ pull(psm2, needle_to_psm2.tolist(), z_upper, z_lower) home(psm2, pos2, rot)
def get_player_matches_data(api_endpoint): dict_response = extract.get_raw_data(api_endpoint=api_endpoint) data_player_matches = transform.transform_data(dict_response=dict_response, resource='players') return data_player_matches
def get_competition_matches_data(api_endpoint): dict_response = extract.get_raw_data(api_endpoint=api_endpoint) data_competition_matches = transform.transform_data( dict_response=dict_response, resource='competitions') return data_competition_matches
# import required libraries import requests import json from transform import transform_data from output_to_csv import output_to_csv # start a requests session session = requests.Session() # request the data packet by using the URL from the developer tools disaster_data_string = session.get( 'http://www.gdacs.org/xml/archive.geojson').text # convert to JSON disaster_data = json.loads(disaster_data_string) # transform the data transformed_data = transform_data(disaster_data) # output the data to CSV output_to_csv(transformed_data) # debug point end = True
# import required libraries import requests import json from transform import transform_data # start a requests session session = requests.Session() # request the data packet by using the URL from the developer tools disaster_data_string = session.get( 'http://www.gdacs.org/xml/archive.geojson').text # convert to JSON disaster_data = json.loads(disaster_data_string) # transform the data output = transform_data(disaster_data) # debug point end = True