def main(): I, Y = load(40) I = I[20:40] X = preprocess(I, num_levels=4) for x, i in zip(X, I): plt.imshow(i) plt.show() plt.imshow(features2image2(x, level=4)) plt.show() vis(X, Y)
def main(): x_train = np.mat([[0], [1], [0], [1]]) y_train = np.mat([[1], [0], [1], [0]]) model = NOT() minimize_operation = tf.train.AdamOptimizer(0.01).minimize(model.loss) session = tf.Session() session.run(tf.global_variables_initializer()) for epoch in range(10000): session.run(minimize_operation, {model.x: x_train, model.y: y_train}) W, b, loss = session.run([model.W, model.b, model.loss], { model.x: x_train, model.y: y_train }) print("W = %s, b = %s, \n loss = \n %s" % (W, b, loss)) session.close() graph = vis(W, b) graph.plot(x_train, y_train)
def main(): x_train = np.mat([[0, 0], [0, 1], [1, 0], [1, 1]]) y_train = np.mat([[0], [1], [1], [0]]) model = XOR() minimize_operation = tf.train.AdamOptimizer(0.01).minimize(model.loss) session = tf.Session() session.run(tf.global_variables_initializer()) for epoch in range(10000): session.run(minimize_operation, {model.x: x_train, model.y: y_train}) W1, W2, b1, b2, loss = session.run( [model.W1, model.W2, model.b1, model.b2, model.loss], { model.x: x_train, model.y: y_train }) print("W1 = %s, W2 = %s, b1 = %s, b2 = %s, \n loss = \n %s" % (W1, W2, b1, b2, loss)) session.close() graph = vis(W1, W2, b1, b2) graph.plot(x_train, y_train)
def compute_and_draw_line(self, p1x, p1y, p2x, p2y, p3x, p3y, p4x, p4y, p5x, p5y, p6x=None, p6y=None, p7x=None, p7y=None, p8x=None, p8y=None, p9x=None, p9y=None, densities=None, image_size=32): """ computes and draws lines into images, batchwise :param batch_of_substroke: vector of control points of substroke [batch_size, 5, 2] :return: batch of images """ image_combined = torch.zeros(100, image_size, image_size).cuda() vis_instance = vis() if densities is not None: points = [ p1x, p1y, p2x, p2y, p3x, p3y, p4x, p4y, p5x, p5y, p6x, p6y, p7x, p7y, p8x, p8y, p9x, p9y ] #points = torch.clamp() for i in range(8): #points[i] = torch.clamp(points[i], 0.0, 30.9) image_combined = vis_instance.render_line(x1=points[2 * i], y1=points[i + 1], x2=points[2 * i + 2], y2=points[2 * i + 3], image_h=image_size, image_w=image_size, image=image_combined, density=densities[:, i]) else: points = [p1x, p1y, p2x, p2y, p3x, p3y, p4x, p4y, p5x, p5y] for i in range(4): image_combined = vis_instance.render_line(x1=points[2 * i], y1=points[2 * i + 1], x2=points[2 * i + 2], y2=points[2 * i + 3], image_h=image_size, image_w=image_size, image=image_combined) return image_combined
def main(): x_arr = [] y_arr = [] z_arr = [] with open( '/home/vebovs/Desktop/machine-learning/regression/linear_regression_3d/day_length_weight.csv' ) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') for row in readCSV: x = [float(row[2])] y = [float(row[1])] z = [float(row[0])] x_arr.append(x) y_arr.append(y) z_arr.append(z) x_train = np.mat(x_arr) # weight y_train = np.mat(y_arr) # length z_train = np.mat(z_arr) # day model = lgm() learning_rate = 0.0000001 minimize_operation = tf.train.GradientDescentOptimizer( learning_rate).minimize(model.loss) session = tf.Session() session.run(tf.global_variables_initializer()) for epoch in range(10000): session.run(minimize_operation, { model.x: x_train, model.y: y_train, model.z: z_train }) W, M, b, loss = session.run([model.W, model.M, model.b, model.loss], { model.x: x_train, model.y: y_train, model.z: z_train }) print("W = %s, M = %s, b = %s, loss = %s" % (W, M, b, loss)) session.close() graph = vis(W, M, b) graph.plot(x_arr, y_arr, z_arr, x_train, y_train, z_train, 'weight', 'length', 'day')
def visual(output: torch.Tensor, ratio: float, raw_img: np.ndarray, cls_conf=0.35) -> np.ndarray: if output is None: return raw_img output = output.cpu() bboxes = output[:, 0:4] # preprocessing: resize bboxes /= ratio cls = output[:, 6] scores = output[:, 4] * output[:, 5] vis_res = vis(raw_img, bboxes, scores, cls, cls_conf, COCO_CLASSES) return vis_res
def visual(self, output, img_info, cls_conf=0.35): ratio = img_info["ratio"] img = img_info["raw_img"] if output is None: return img output = output.numpy() # preprocessing: resize bboxes = output[:, 0:4] / ratio cls = output[:, 6] scores = output[:, 4] * output[:, 5] vis_res = vis(img, bboxes, scores, cls, cls_conf, self.cls_names) return vis_res
def main(): x_arr = [] y_arr = [] with open( '/home/vebovs/Desktop/machine-learning/regression/non_linear_regression_2d/day_head_circumference.csv' ) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') for row in readCSV: x = [float(row[0])] y = [float(row[1])] x_arr.append(x) y_arr.append(y) x_train = np.mat(x_arr) # day y_train = np.mat(y_arr) # head_circumference model = lgm() learning_rate = 0.00001 minimize_operation = tf.train.AdamOptimizer(learning_rate).minimize( model.loss) session = tf.Session() session.run(tf.global_variables_initializer()) for epoch in range(10000): session.run(minimize_operation, {model.x: x_train, model.y: y_train}) W, b, loss = session.run([model.W, model.b, model.loss], { model.x: x_train, model.y: y_train }) print("W = %s, b = %s, loss = %s" % (W, b, loss)) session.close() graph = vis(W, b) graph.plot(x_arr, x_train, y_train, 'day', 'head circumference')
def yolox(source, model_name, model_size, video_path, output, fps, frame_size): """ This function is used to detect objects in a video :param model_name: The name of the model to use :param model_size: Size of the model :param video_path: Path to the video file :param output: The output file name :param fps: The FPS (frames per second) of the output video :param frame_size: The size of the frame to be saving """ """ 基于 yolox 的目标检测器 """ model_w = model_size[0] model_h = model_size[1] # click.echo(click.get_current_context().params) device_info = getDeviceInfo() # type: dai.DeviceInfo with dai.Device(create_pipeline(source, model_name, model_w, model_h), device_info) as device: print("Starting pipeline...") fps_handler = FPSHandler() if source: cap = cv2.VideoCapture(video_path) frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) frame_shape = [frame_height, frame_width] print("CAP_PROP_FRAME_SHAPE: %s" % frame_shape) cap_fps = int(cap.get(cv2.CAP_PROP_FPS)) print("CAP_PROP_FPS: %d" % cap_fps) yolox_det_in = device.getInputQueue("yolox_det_in") else: cam_out = device.getOutputQueue("cam_out", 1, True) yolox_det_nn = device.getOutputQueue("yolox_det_nn") def should_run(): if source: return cap.isOpened() else: return True def get_frame(): """ Get the current frame from the camera and return it """ if source: return cap.read() else: return True, cam_out.get().getCvFrame() if output: output.parent.mkdir(parents=True, exist_ok=True) fourcc = cv2.VideoWriter_fourcc(*"mp4v") writer = cv2.VideoWriter(str(output), fourcc, fps, frame_size) while should_run(): read_correctly, frame = get_frame() fps_handler.tick("Frame") if not read_correctly: break frame_debug = frame.copy() if source: run_nn(yolox_det_in, to_planar(frame, (model_h, model_w)), model_w, model_h) yolox_det_data = yolox_det_nn.get() res = toTensorResult(yolox_det_data).get("output") fps_handler.tick("nn") predictions = demo_postprocess(res, (model_h, model_w), p6=False)[0] boxes = predictions[:, :4] scores = predictions[:, 4, None] * predictions[:, 5:] boxes_xyxy = np.ones_like(boxes) boxes_xyxy[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0 boxes_xyxy[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0 boxes_xyxy[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0 boxes_xyxy[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0 input_shape = np.array([model_h, model_w]) min_r = (input_shape / frame.shape[:2]).min() offset = (np.array(frame.shape[:2]) * min_r - input_shape) / 2 offset = np.ravel([offset, offset]) boxes_xyxy = (boxes_xyxy + offset[::-1]) / min_r dets = multiclass_nms(boxes_xyxy, scores, nms_thr=0.45, score_thr=0.2) if dets is not None: final_boxes = dets[:, :4] final_scores, final_cls_inds = dets[:, 4], dets[:, 5] frame_debug = vis( frame_debug, final_boxes, final_scores, final_cls_inds, conf=0.5, class_names=LABELS.get(model_name), ) cv2.imshow("", frame_debug) if output: writer.write(cv2.resize(frame_debug, frame_size)) key = cv2.waitKey(1) if key in [ord("q"), 27]: break elif key == ord("s"): cv2.imwrite( "saved_%s.jpg" % time.strftime("%Y%m%d_%H%M%S", time.localtime()), frame_debug, ) fps_handler.printStatus() if source: cap.release() if output: writer.release() cv2.destroyAllWindows()
u = irisdat[i] if (i <= 50): val.append([u, 0]) elif (50 < i and i <= 100): val.append([u, 1]) elif (100 < i and i <= 150): val.append([u, 2]) random.shuffle(val) training = val[0:99] testing = val[100:] iters = 1000 NN.train(training, iters, 0.32) #print("Previously seen (training) example progress: ") #NN.test(training) #print("----------------------------------------------") print("New and Never Before Seen (testing) example set: ") NN.test(testing) vis(NN.totalErr) end_time = time.process_time() now = datetime.now() current_time = now.strftime("%H:%M:%S") print("Time Started: ", starting_time) print("Time Finished: ", current_time) print("Time Elapsed: ", (end_time - begin)) print("Learning Rate: " + str(NN.alpha)) print("Iterations Trained: " + str(iters))