def train(): feature_mapper = FeatureMapper() score = Score() segmentation = Segmentation() feature_mapper.load_weights("./immutable_weights/feature_mapper") # score.load_weights("./weights/score") # segmentation.load_weights("./weights/segmentation") opt = Adam(learning_rate=5e-5) with open("../data/data_classification_train.json") as json_file: data = json.load(json_file) data_index = 0 while str(data_index) in data: img = get_img( "../pictures/pictures_classification_train/{}.png".format( data_index)) true_masks = get_true_mask(data[str(data_index)]) features = feature_mapper(img) def get_loss(): segmentation_prediction = segmentation(features) score_prediction = score(features) show_evaluation(segmentation_prediction, true_masks, data_index) return calculate_loss(segmentation_prediction, score_prediction, true_masks) opt.minimize(get_loss, [score.trainable_weights, segmentation.trainable_weights]) if (data_index % 100 == 99): score.save_weights("./weights/score") segmentation.save_weights("./weights/segmentation") data_index += 1
def show_mask(data_index): img_path = "../pictures/pictures_detect_local_evaluate_100/{}.png".format( data_index) img = get_img(img_path) boxes, probs, classification_logits, regression_values = get_prediction( img) bounding_boxes = [] for i in range(len(boxes)): if probs[i] > .9: x1, y1, x2, y2 = get_final_box(boxes[i], regression_values[i], limit_border=False) label = np.argmax(classification_logits[i]) bounding_boxes.append({ "x1": x1 * real_image_width // feature_size, "y1": y1 * real_image_height // feature_size, "x2": x2 * real_image_width // feature_size, "y2": y2 * real_image_height // feature_size, "label": label, }) mask = get_prediction_mask(img) show(data_index, img_path, bounding_boxes, mask)
def train(): feature_mapper = FeatureMapper() rpn = Rpn() roi_pooling = RoiPooling() classifier = Classifier() regr = Regr() feature_mapper.load_weights("./weights/feature_mapper") rpn.load_weights("./weights/rpn") classifier.load_weights("./weights/classifier") regr.load_weights("./weights/regr") opt = Adam(learning_rate=5e-5) with open("../data/data_detect_local_evaluate_10000.json") as json_file: data = json.load(json_file) data_index = 0 while str(data_index) in data: raw_data = data[str(data_index)] target, bounding_box_target = get_localization_data(raw_data) img = get_img("../pictures/pictures_detect_local_evaluate_10000/{}.png".format(data_index)) def get_loss(): features = feature_mapper(img) rpn_map = rpn(features) boxes, probs = get_boxes(rpn_map) feature_areas = roi_pooling(features, boxes) classification_logits = classifier(feature_areas) regression_values = regr(feature_areas) labels_boxes = get_labels_boxes(boxes, target) localization_loss = get_localization_loss(rpn_map, target) regression_loss = get_regression_loss(regression_values, boxes, bounding_box_target, probs) classification_loss = get_classification_loss(classification_logits, labels_boxes, probs) no_regr_boxes_precision = get_boxes_precision(boxes, np.zeros(regression_values.shape), target) final_boxes_precision = get_boxes_precision(boxes, regression_values.numpy(), target) save_data(data_index, raw_data, boxes.tolist(), [a.numpy().tolist() for a in classification_logits], labels_boxes, no_regr_boxes_precision, final_boxes_precision, probs.tolist()) return localization_loss + classification_loss + regression_loss opt.minimize( get_loss, [feature_mapper.trainable_weights, rpn.trainable_weights, classifier.trainable_weights, regr.trainable_weights], ) data_index += 1 if (data_index % 100 == 99): feature_mapper.save_weights("./weights/feature_mapper") rpn.save_weights("./weights/rpn") classifier.save_weights("./weights/classifier") regr.save_weights("./weights/regr")
def train(): feature_mapper = FeatureMapper() rpn = Rpn() roi_pooling = RoiPooling() regr = Regr() segmentation = Segmentation() feature_mapper.load_weights("./weights/feature_mapper") rpn.load_weights("./weights/rpn") regr.load_weights("./weights/regr") segmentation.load_weights("./weights/segmentation") opt = Adam(learning_rate=5e-5) with open("../data/data_detect_local_evaluate_10000.json") as json_file: data = json.load(json_file) data_index = 0 while str(data_index) in data: raw_data = data[str(data_index)] true_mask = get_true_mask(raw_data) img = get_img( "../pictures/pictures_detect_local_evaluate_10000/{}.png".format( data_index)) features = feature_mapper(img) rpn_map = rpn(features) boxes, probs = get_boxes(rpn_map) feature_areas = roi_pooling(features, boxes) regression_values = regr(feature_areas) regr_boxes = [ get_final_box(boxes[i], regression_values[i].numpy()) for i in range(len(boxes)) if probs[i] > .9 ] if len(regr_boxes) > 0: regr_feature_areas = roi_pooling(features, regr_boxes) box_true_masks = get_box_true_mask(regr_boxes, true_mask) def get_loss(): predicted_masks = segmentation(regr_feature_areas) return get_segmentation_loss(predicted_masks, box_true_masks) opt.minimize(get_loss, [segmentation.trainable_weights]) data_index += 1 if (data_index % 100 == 99): print("{} - Weights saved".format(data_index)) segmentation.save_weights("./weights/segmentation")
def show_prediction(data_index): with open("../data/data_detect_local_evaluate_100.json") as json_file: data = json.load(json_file) if (data_index not in data): print("Index {} out of range".format(data_index)) return yolo = Yolo() yolo.load_weights("./weights/yolo") img_path = "../pictures/pictures_detect_local_evaluate_100/{}.png".format( data_index) img = get_img(img_path) preds = yolo(img) boxes = get_boxes(preds) show(data_index, img_path, boxes)
def train(): yolo = Yolo() yolo.load_weights("./weights/yolo") opt = Adam(learning_rate=5e-5) with open("../data/data_detect_local_train.json") as json_file: data = json.load(json_file) data_index = 0 while str(data_index) in data: img = get_img("../pictures/pictures_detect_local_train/{}.png".format( data_index)) true_labels, true_boxes, true_preds = get_localization_data( data[str(data_index)]) def get_loss(): preds = yolo(img) return calculate_loss(preds, true_labels, true_boxes, true_preds) opt.minimize(get_loss, [yolo.trainable_weights]) if (data_index % 100 == 99): yolo.save_weights("./weights/yolo") data_index += 1
elif click_count == 5: # 一等奖 按钮 new_prize_btn(root, frelx=5, frely=23, cmd=lambda: self.btn_click(click_count + 1), bg=btn_bg) main_app = Main() root = Tk() root.title("---融易通 2018年会 抽奖系统---") root.resizable(width=False, height=False) # 固定宽高 background_img = img.get_img('img/bg.png') w = background_img.width() h = background_img.height() root.geometry('%dx%d+0+0' % (w, h)) # 背景图 Label(root, image=background_img).place(relx=0.5, rely=0.5, relwidth=1, relheight=1, anchor=CENTER) btn_bg = img.get_img('img/prize_btn.png') # 初始化界面 main_app.init_view()
def evaluate(): with open("../data/data_detect_local_evaluate_100.json") as json_file: data = json.load(json_file) data_index = 0 total_match = [] total_missing_blobs = 0 total_extra_blobs = 0 while str(data_index) in data: img = get_img( "../pictures/pictures_detect_local_evaluate_100/{}.png".format( data_index)) boxes, probs, classification_logits, regression_values = get_prediction( img) raw_data = data[str(data_index)] all_blobs = raw_data["army"] + raw_data["enemy"] estimates = [] for i in range(len(boxes)): if probs[i] > .9: box = get_final_box(boxes[i], regression_values[i], limit_border=False) classification_probs = tf.nn.softmax( classification_logits[i]).numpy() match = [] for i, t_blob in enumerate(all_blobs): c_x = (box[0] + box[2]) / (2 * feature_size) c_y = (box[1] + box[3]) / (2 * feature_size) t_x = t_blob["x"] t_y = t_blob["y"] m = int(t_blob["alive"]) distance = math.sqrt((c_x - t_x)**2 + (c_y - t_y)**2) m = m / (1 + distance) label = 1 + statuses[t_blob["status"]] + 3 * int(i >= 3) m = m * classification_probs[label] match.append(m) estimates.append(match) output = [] while len(estimates) != 0: if (np.max(estimates) < .2): break best_match = np.argmax(estimates) predicted_blob = best_match // 6 target_blob = best_match % 6 output.append(estimates[predicted_blob][target_blob]) estimates.pop(predicted_blob) for match in estimates: match[target_blob] = 0 alive_blobs = len([1 for b in all_blobs if b["alive"] == True]) blobs_count_difference = alive_blobs - len(output) if len(output) == 0: picture_match = 0. else: picture_match = sum(output) / len(output) print("\n>>>>>> {}".format(data_index)) print("{} blobs matching".format(len(output))) print("{}% average match".format(int(picture_match * 100))) print("{} blobs missed".format(max(0, blobs_count_difference))) print("{} extra predictions".format(max(0, -blobs_count_difference))) total_match.append(picture_match) total_missing_blobs += max(0, blobs_count_difference) total_extra_blobs += max(0, -blobs_count_difference) data_index += 1 print("\n\n>>>>>> Summary for {} pictures".format(data_index)) print("Average match: {}%".format( int(sum(total_match) / len(total_match) * 100))) print("Total missed blobs: {}".format(total_missing_blobs)) print("Total extra predictions: {}".format(total_extra_blobs))
from ui import get_ui, get_broarcaster from img import get_img get_img("abc", 0, True)