def parse_bev_predmap(predmap, anchors): xmap = np.tile( np.array(range(cfg.BEV.OUTPUT_Y))[:, np.newaxis], [1, cfg.BEV.OUTPUT_X]) ymap = np.tile( np.array(range(cfg.BEV.OUTPUT_X))[np.newaxis, :], [cfg.BEV.OUTPUT_Y, 1]) xy_grid = np.stack((xmap, ymap), axis=-1) predmap = np.concatenate((predmap, xy_grid), axis=-1) preds = predmap[math.sigmoid(predmap[..., 0]) > 0.6] objness = math.sigmoid(preds[..., 0])[..., np.newaxis] clsness = math.sigmoid(preds[..., 1:cfg.CONTFUSE.CLASSES_NUM + 1]) box = preds[..., cfg.CONTFUSE.CLASSES_NUM + 1:-2].reshape( -1, cfg.CONTFUSE.CLASSES_NUM, cfg.BEV.BBOX_DIM) prob = clsness * objness cls_max_prob = np.max(prob, axis=-1) cls_idx = np.argmax(prob, axis=-1) box = box[np.arange(box.shape[0]), cls_idx] xx = preds[..., -2] - box[..., 0] * anchors[cls_idx, 3] yy = preds[..., -1] - box[..., 1] * anchors[cls_idx, 4] x = cfg.BEV.X_MAX - xx * cfg.BEV.X_RESOLUTION * cfg.BEV.STRIDE y = cfg.BEV.Y_MAX - yy * cfg.BEV.Y_RESOLUTION * cfg.BEV.STRIDE hwl = box[..., 2:5] * anchors[cls_idx][..., :3] theta = np.arctan2(np.sin(box[..., 5]), np.cos(box[..., 5])) result = np.stack([ cls_idx, cls_max_prob, x, y, hwl[..., 0], hwl[..., 1], hwl[..., 2], theta ], axis=-1) return result[cls_max_prob > 0.6]
def backpropagation_algo(self, t_image, t_ans): update_b = [ np.zeros(feature_bias.shape) for feature_bias in self.feature_bias ] update_w = [ np.zeros(feature_weight.shape) for feature_weight in self.feature_weights ] derivative, changes, change = self.update_derivate_changes([], [t_image], t_image) derivative_sig = sigmoid(derivative[-1]) delta = (changes[-1] - t_ans) * (derivative_sig * (1 - derivative_sig)) update_b[-1] = delta update_w[-1] = np.dot(delta, changes[-2].transpose()) for layer_no in range(2, self.num_layers): derivative_result = derivative[-layer_no] derivative_result_sig = sigmoid(derivative_result) derivative_sig_prime = (derivative_result_sig * (1 - derivative_result_sig)) delta = np.dot(self.feature_weights[-layer_no + 1].transpose(), delta) * derivative_sig_prime update_b[-layer_no] = delta update_w[-layer_no] = np.dot(delta, changes[-layer_no - 1].transpose()) return (update_b, update_w)
def _sample_laddered_layer(lateral, s): s[...,:] += lateral[:,0] s[...,0] = sample_indicator(sigmoid(s[...,0])) for i in range(1, s.shape[-1]): j = min(i, lateral.shape[1]-1) s[...,i] += s[...,i-j:i].dot(lateral[i,j:0:-1]) s[...,i] = sample_indicator(sigmoid(s[...,i])) return s
def sample_generative_dist(self, size = None, all_layers = False, top_units = None): """ Sample the generative distribution. Parameters: ----------- size : int, optional [default None] The number of samples to draw. If None, returns a single sample. all_layers : bool, optional [default False] By default, an array of input unit samples is returned. If 'all_layers` is True, a list of sample arrays for *all* the layers, in top-to-bottom order, is returned. top_units : bit vector, optional By default, the top-level units are sampled from the generative biases. This parameter clamps the top-level units to specific values. Returns: -------- A (list of) 2D sample array(s), where the first dimension indexes the individual samples. See 'all_layers' parameter. """ d = self.G_top if top_units is None else top_units if size is not None: d = np.tile(d, (size,1)) if top_units is None: d = sample_indicator(sigmoid(d)) samples = _sample_factorial_network(self.G, d) return samples if all_layers else samples[-1]
def _generative_probs_for_sample(self, samples): """ The generative probabilities for each unit in the network, given a sample of the hidden units. """ probs = _probs_for_factorial_network(self.G, samples) probs.insert(0, sigmoid(self.G_top)) return probs
def compute_control_images(play_data, image_wd, image_ht): home = play_data[play_data.Team == 'home'] away = play_data[play_data.Team == 'away'] team_0 = compute_image_for_team(away, image_wd, image_ht) team_1 = compute_image_for_team(home, image_wd, image_ht) return sigmoid(team_0.sum(axis=0) - team_1.sum(axis=0))
def predict(self, X): """ :param X: test data :return: y labels for given X """ return np.round(sigmoid(X.dot(self.param))).astype(int)
def update_derivate_changes(self, derivative, changes, change): for feature_bias, feature_weight in zip(self.feature_bias, self.feature_weights): derivative_result = np.dot(feature_weight, change) + feature_bias derivative.append(derivative_result) change = sigmoid(derivative_result) changes.append(change) return derivative, changes, change
def top_conditional_probs(self): """ For each unit in the top layer, compute the probability that it's on given that its parents are off. """ top_bias = sigmoid(self.G_lateral[0][:,0]) probs = np.empty(top_bias.shape) probs[0] = top_bias[0] probs[1:] = np.cumprod(1-top_bias)[:-1] * top_bias[1:] return probs
def _probs_for_boltzmann_layer(group_size, p): # Shortcut for degenerate case. if group_size == 1: return sigmoid(p, out=p) p.shape = p.shape[:-1] + (-1, group_size) boltzmann_dist(p, axis=-1, out=p) p.shape = p.shape[:-2] + (-1,) return p
def _sample_boltzmann_layer(group_size, s): # Shortcut for degenerate case. if group_size == 1: return sample_indicator(sigmoid(s, out=s), out=s) s.shape = s.shape[:-1] + (-1, group_size) boltzmann_dist(s, axis=-1, out=s) sample_exclusive_indicators(s, axis=-1, out=s) s.shape = s.shape[:-2] + (-1,) return s
def parse_img_predmap(predmap, anchors): anchor_shape = [ cfg.IMAGE.OUTPUT_H, cfg.IMAGE.OUTPUT_W, anchors.shape[0], anchors.shape[1] ] anchors = np.broadcast_to(np.array(anchors), anchor_shape) h = np.tile( np.array(range(cfg.IMAGE.OUTPUT_H))[:, np.newaxis], [1, cfg.IMAGE.OUTPUT_W]) w = np.tile( np.array(range(cfg.IMAGE.OUTPUT_W))[np.newaxis, :], [cfg.IMAGE.OUTPUT_H, 1]) hw_grid = np.stack((h, w), axis=-1) hw_shape = [ cfg.IMAGE.OUTPUT_H, cfg.IMAGE.OUTPUT_W, cfg.IMAGE.ANCHORS_NUM, 2 ] hw_grid = np.tile(hw_grid, cfg.IMAGE.ANCHORS_NUM).reshape(hw_shape) box_shape = [ cfg.IMAGE.OUTPUT_H, cfg.IMAGE.OUTPUT_W, cfg.IMAGE.ANCHORS_NUM, cfg.CONTFUSE.CLASSES_NUM + cfg.IMAGE.BBOX_DIM + 1 ] predmap = predmap.reshape(box_shape) predmap = np.concatenate((predmap, hw_grid, anchors), axis=-1) preds = predmap[math.sigmoid(predmap[..., 0]) > 0.5] objness = math.sigmoid(preds[..., 0])[..., np.newaxis] clsness = math.sigmoid(preds[..., 1:cfg.CONTFUSE.CLASSES_NUM + 1]) box = preds[..., cfg.CONTFUSE.CLASSES_NUM + 1:] prob = objness * clsness cls_max_prob = np.max(prob, axis=-1) cls_idx = np.argmax(prob, axis=-1) x = (box[:, 0] + box[:, -4]) * cfg.IMAGE.STRIDE / cfg.IMAGE.H_SCALE_RATIO y = (box[:, 1] + box[:, -3]) * cfg.IMAGE.STRIDE / cfg.IMAGE.W_SCALE_RATIO h = box[:, 2] / cfg.IMAGE.H_SCALE_RATIO * box[:, -2] w = box[:, 3] / cfg.IMAGE.W_SCALE_RATIO * box[:, -1] left = y - w / 2 top = x - h / 2 right = y + w / 2 bottom = x + h / 2 result = np.stack([cls_idx, cls_max_prob, left, top, right, bottom], axis=-1) return result[cls_max_prob > 0.5]
def _sleep(G, G_top, R, rate): # Generate a dream. d = sample_indicator(sigmoid(G_top)) dreams = _sample_factorial_network(G, d) dreams.reverse() # Pass back up through the recognition network and adjust weights. R_probs = _probs_for_factorial_network(R, dreams) for R_weights, inputs, target, recognized, step \ in izip(R, dreams, dreams[1:], R_probs, rate[::-1]): R_weights[:-1] += step * np.outer(inputs, target - recognized) R_weights[-1] += step * (target - recognized)
def _wake(sample, G, G_top, R, rate): # Sample data from the recognition network. samples = _sample_factorial_network(R, sample) samples.reverse() # Pass back down through the generation network and adjust weights. G_top += rate[0] * (samples[0] - sigmoid(G_top)) G_probs = _probs_for_factorial_network(G, samples) for G_weights, inputs, target, generated, step \ in izip(G, samples, samples[1:], G_probs, rate[1:]): G_weights[:-1] += step * np.outer(inputs, target - generated) G_weights[-1] += step * (target - generated)
def fit(self, X, y, epoch=4000): """ :param X: X training images data :param y: training labels :param epoch: number of times to run (stopping criteria) """ self._initialize_parameters(X) for i in range(epoch): # Make a new prediction y_pred = sigmoid(X.dot(self.param)) # minimize the loss self.gradient_decent(X, y, y_pred)
def back_propergation(self, data, label): # Calculate input and output for all neurons a, z = [data], [] for i in range(self.layers-1): z.append(np.dot(self.weights[i], a[-1]) + self.biases[i]) a.append(math.sigmoid(z[-1])) # Back propergation delta_w = [None for i in range(self.layers-1)] delta_b = [None for i in range(self.layers-1)] error = self.cost.gradient(a[-1], label) * math.sigmoid_diff(z[-1]) for i in range(self.layers-2, -1, -1): delta_b[i] = error delta_w[i] = np.array(np.mat(error).T * np.mat(a[i])) if i > 0: error = np.dot(self.weights[i].T, error) * math.sigmoid_diff(z[i-1]) return delta_w, delta_b
def _sample_factorial_network(layers, s): samples = [ s ] for L in layers: s = sample_indicator(sigmoid(s.dot(L[:-1]) + L[-1])) samples.append(s) return samples
def get_class1_prob(self, obs): return sigmoid(self._get_score(obs) / self.max_score)
import sys sys.path.append("../") import os import cv2 from glob import glob from utils import math from utils import vis_tools from config.config import cfg from data import postprocess from data import loader import numpy as np img_pred_files = glob(cfg.YOLOv2.LOG_DIR + "/pred/img_pred/*") img_anchors = loader.load_anchors(cfg.IMG.ANCHORS) img_dir = os.path.join(cfg.YOLOv2.DATASETS_DIR, "image_files/") for fi in img_pred_files: img_pred = np.load(fi) img_map = img_pred.reshape([cfg.IMG.OUTPUT_H, cfg.IMG.OUTPUT_W, 6, 11]) vis_tools.imshow_img(np.max(math.sigmoid(img_map[..., 0]), axis=-1)) vis_tools.imshow_img( np.max(math.sigmoid(img_map[..., 1:cfg.YOLOv2.CLASSES_NUM + 1])[..., 0], axis=-1)) img_bboxes = postprocess.parse_img_predmap(img_pred, img_anchors) img_bboxes = postprocess.img_nms(img_bboxes, cfg.IMG.IOU_THRESHOLDS) img_file = img_dir + fi[-14:-8] + ".png" img = cv2.imread(img_file) vis_tools.imshow_img_bbox(img, np.array(img_bboxes))
def compute_ctrl_prob(play_images): return sigmoid(play_images[:11].sum(axis=0) - play_images[11:].sum(axis=0))
def _detect_objects(*, orig_image_width: int, orig_image_height: int, yolo_predicted: np.array, anchor_start_idx: int, prob_treshold: float, nms_iou_tresh=0.5): box_candidates = [] box_scores = [] box_classes = [] num_of_grid_cols, num_of_grid_rows = yolo_predicted.shape[ 1], yolo_predicted.shape[2] for col_idx, cell_grid in enumerate(yolo_predicted[0]): for row_idx, cell in enumerate(cell_grid): for anchor_idx, box in enumerate(cell): prob_obj = sigmoid(box[4]) class_probs = list(map(lambda x: sigmoid(x), box[5:])) prob_chosen_class = prob_obj * np.array(class_probs) detected_classes_idx = np.where( prob_chosen_class > prob_treshold)[0] if len(detected_classes_idx) > 0: box_center_x = (row_idx + sigmoid(box[0])) / num_of_grid_rows box_center_y = (col_idx + sigmoid(box[1])) / num_of_grid_cols width_feat = box[2] height_feat = box[3] grid_cell_width = ( np.exp(width_feat) * ANCHORS[anchor_start_idx][anchor_idx][0]) / MODEL_WIDTH grid_cell_height = (np.exp(height_feat) * ANCHORS[anchor_start_idx][anchor_idx] [1]) / MODEL_HEIGHT box_left_x, box_left_y, box_right_x, box_right_y = get_corrected_boxes( box_width=grid_cell_width, box_height=grid_cell_height, box_x=box_center_x, box_y=box_center_y, orig_image_shape=(orig_image_width, orig_image_height), model_image_shape=(MODEL_WIDTH, MODEL_HEIGHT)) for i in detected_classes_idx: detected_class_idx = i box_candidates.append( [box_left_x, box_left_y, box_right_x, box_right_y]) box_classes.append(detected_class_idx) box_scores.append(prob_chosen_class[i]) chosen_box_indices = non_max_suppression(box_candidates, box_scores, box_classes, nms_iou_tresh) picked_boxes = [box_candidates[i] for i in chosen_box_indices] picked_classes = [box_classes[i] for i in chosen_box_indices] picked_scores = [box_scores[i] for i in chosen_box_indices] return picked_boxes, picked_classes, picked_scores
def _probs_for_factorial_network(layers, samples): return [ sigmoid(s.dot(L[:-1]) + L[-1]) for L, s in izip(layers, samples) ]
def _probs_for_laddered_layer(lateral, s, p): p[...,:] += lateral[:,0] for i in range(1, s.shape[-1]): j = min(i, lateral.shape[1]-1) p[...,i] += s[...,i-j:i].dot(lateral[i,j:0:-1]) return sigmoid(p, out=p)
def predict(self, data): for i in range(self.layers-1): data = math.sigmoid(np.dot(self.weights[i], data) + self.biases[i]) return data
Sx = play_data.iloc[x].S * np.cos(theta) * play_data.iloc[x].A Sy = play_data.iloc[x].S * np.sin(theta) * play_data.iloc[x].A S = np.array([[Sx, 0], [0, Sy]]) Sigma = R @ S @ S @ np.linalg.inv(R) player_x_pos = int(round(play_data.iloc[x].X)) player_y_pos = int(round(play_data.iloc[x].Y)) mvn_for_player = mvn(mean=[player_x_pos, player_y_pos], cov=Sigma) for i, (a, b) in enumerate(itertools.product(range(image_ht), range(image_wd))): a = image_ht - a - 1 player_influence_images[x, a, b] = mvn_for_player.pdf([b, a]) player_influence_images[x] = sigmoid(player_influence_images[x]) # player_influence_images[x] /= player_influence_images[x].max() team_1 = player_influence_images[:11, :, :].sum(axis=0) team_2 = player_influence_images[11:, :, :].sum(axis=0) prob_of_ctrl = sigmoid(team_1 - team_2) fig, ax = plt.subplots(3) ax[0].imshow(prob_of_ctrl, origin='lower') ax[1].imshow(team_1, origin='lower') ax[2].imshow(team_2, origin='lower') fig.savefig("./data/sigmoid_player_influences.png", dpi=300)
def feed_forward(self, image_test): input_values = zip(self.feature_bias, self.feature_weights) for bias, weight in input_values: image_test = sigmoid(np.dot(weight, image_test) + bias) return image_test