rpn_accuracy_rpn_monitor = [] print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.') # data generator X, Y, img_data = next(data_gen_train) # print("加载一个batch的数据") # print(X,Y,img_data) loss_rpn = model_rpn.train_on_batch(X, Y) write_log(callback, ['rpn_cls_loss', 'rpn_reg_loss'], loss_rpn, train_step) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, 'tf', use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format # print(R) X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) # print("debug1") # print(X2,Y1,Y2,IouS) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue # sampling positive/negative samples neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0)
def build_and_train(hype_space, save_best_weights=False): train_path = '/home/comp/e4252392/retraindata4frcnn.txt' config_output_filename = '/home/comp/e4252392/hyperopt/hyperopt_config.pickle' num_epochs = 20 #for retrain best model only diagnose_path = '/home/comp/e4252392/hyperopt/models/hyperopt_loss_ap_plt.npy' real_model_path = '/home/comp/e4252392/hyperopt/models/hyperopt_model_plt_' print("Hyperspace:") print(hype_space) C = config.Config() C.num_rois = int(hype_space['num_rois']) #why int? # C.anchor_box_scales = hype_space['anchor_box_scales'] # C.base_net_weights = '/home/comp/e4252392/second_res_more_epoch.h5' C.base_net_weights = 'model_frcnn.hdf5' #data all_imgs, classes_count, class_mapping = get_data(train_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(config_output_filename)) random.shuffle(all_imgs) num_imgs = len(all_imgs) train_imgs = [s for s in all_imgs] print('Num train samples {}'.format(len(train_imgs))) data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train') #data # build_model if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) shared_layers = nn.nn_base(int(hype_space['kernel_size']), img_input, trainable=True) num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(int(hype_space['kernel_size']), shared_layers, num_anchors) classifier = nn.classifier(int(hype_space['kernel_size']), shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) model_all = Model([img_input, roi_input], rpn[:2] + classifier) try: print('loading weights from {}'.format(C.base_net_weights)) model_rpn.load_weights(C.base_net_weights, by_name=True) model_classifier.load_weights(C.base_net_weights, by_name=True) except: print( 'Could not load pretrained model weights. Weights can be found in the keras application folder \ https://github.com/fchollet/keras/tree/master/keras/applications') # optimizer = Adam(lr=1e-5) # optimizer_classifier = Adam(lr=1e-5) optimizer = Adam(lr=hype_space['optimizer_lr'], decay=hype_space['optimizer_decay']) optimizer_classifier = Adam(lr=hype_space['optimizer_lr'], decay=hype_space['optimizer_decay']) model_rpn.compile(optimizer=optimizer, loss=[ thelosses.rpn_loss_cls(num_anchors), thelosses.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ thelosses.class_loss_cls, thelosses.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) sgd = SGD(lr=hype_space['sgd_lr'], decay=hype_space['sgd_decay']) model_all.compile(optimizer=sgd, loss='mae') # build_model #build_and_train epoch_length = 10 iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf print('Starting training') loss_array = [] ap_array = [] epoch_array = [] epoch_array.append(0) result = {} model_name = '' for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) # train X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) # train losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format( loss_class_cls)) print('Loss Detector regression: {}'.format( loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) # result curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if C.verbose: print( 'Total loss decreased from {} to {}, saving weights' .format(best_loss, curr_loss)) best_loss = curr_loss if save_best_weights: real_model_path = real_model_path + str( epoch_num + 1) + '.hdf5' model_all.save_weights(real_model_path, overwrite=True) print("Best weights so far saved to " + real_model_path + ". best_loss = " + str(best_loss)) epoch_array.append(epoch_num + 1) loss_array.append([ loss_rpn_cls, loss_rpn_regr, loss_class_cls, loss_class_regr, best_loss ]) album_ap, logo_ap, mAP = measure_map.measure_map( config_output_filename, real_model_path) ap_array.append([album_ap, logo_ap, mAP]) np.save(diagnose_path, [epoch_array, loss_array, ap_array]) else: album_ap = 'not applicable' logo_ap = 'not applicable' mAP = 'not applicable' model_name = "model_{}_{}".format( str(best_loss), str(uuid.uuid4())[:5]) result = { 'loss': best_loss, 'loss_rpn_cls': loss_rpn_cls, 'loss_rpn_regr': loss_rpn_regr, 'loss_class_cls': loss_class_cls, 'loss_class_regr': loss_class_regr, 'album_ap': album_ap, 'logo_ap': logo_ap, 'mAP': mAP, 'model_name': model_name, 'space': hype_space, 'status': STATUS_OK } print("RESULT UPDATED.") print("Model name: {}".format(model_name)) # result break except Exception as e: print('Exception: {}'.format(e)) continue print('Training complete, exiting.') print("BEST MODEL: {}".format(model_name)) print("FINAL RESULT:") print_json(result) save_json_result(model_name, result) try: K.clear_session() del model_all, model_rpn, model_classifier except Exception as err: try: K.clear_session() except: pass err_str = str(err) print(err_str) traceback_str = str(traceback.format_exc()) print(traceback_str) return { 'status': STATUS_FAIL, 'err': err_str, 'traceback': traceback_str } print("\n\n") return model_name, result
def test_view_func_NN(model_classifier, model_rpn, model_inner, C): test_cls = 'aeroplane' input_train_file = 'pickle_data/train_data_Wflip_all.pickle' ## read the training data from pickle file or from annotations test_pickle = 'pickle_data/test_data_{}.pickle'.format(test_cls) if os.path.exists(test_pickle): with open(test_pickle) as f: all_imgs, classes_count, _ = pickle.load(f) class_mapping = C.class_mapping inv_class_mapping = {v: k for k, v in class_mapping.iteritems()} backend = K.image_dim_ordering() gt_cls_num = class_mapping[test_cls] print('work on class {}'.format(test_cls)) base_path = os.getcwd() # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False count = 0 good_img = 0 not_good = 0 def format_img_size(img, C): """ formats the image size based on config """ img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: ratio = img_min_side / width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side / height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def format_img_channels(img, C): """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def format_img(img, C): """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = format_img_channels(img, C) return img, ratio def display_image(img): img1 = img[:, :, (2, 1, 0)] # img1=img im = Image.fromarray(img1.astype('uint8'), 'RGB') im.show() # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): ## read the training data from pickle file or from annotations real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2, real_y2) vnum_test = 24 azimuth_vec = np.concatenate( ([0], np.linspace((360. / (vnum_test * 2)), 360. - (360. / (vnum_test * 2)), vnum_test)), axis=0) def find_interval(azimuth, azimuth_vec): for i in range(len(azimuth_vec)): if azimuth < azimuth_vec[i]: break ind = i if azimuth > azimuth_vec[-1]: ind = 1 return ind class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} # print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = 32 obj_num = 0 bbox_threshold_orig = 0.6 th_bbox = 0.4 ## get GT for all az for single cls feature_az = [] sorted_path = input_train_file tmp_ind = sorted_path.index('.pickle') sorted_path = sorted_path[:tmp_ind] + "_sorted_Angles" + sorted_path[ tmp_ind:] if os.path.exists(sorted_path): print("loading sorted data") with open(sorted_path) as f: trip_data = pickle.load(f) im_file = [] ind = [] for ii in range(360): for jj in range(3): try: im_file.append(trip_data[test_cls][ii][jj]) ind.append(ii) except: if jj == 0: print('no azimuth {}'.format(ii)) data_gen_train = data_generators.get_anchor_gt(im_file, [], C, K.image_dim_ordering(), mode='test') azimuth_dict = [] inner_NN = [] azimuths = [] for tt in range(len(ind)): try: if tt % 100 == 0: print('worked on {}/{}'.format(tt, len(ind))) # print ('im num {}'.format(good_img)) X, Y, img_data = next(data_gen_train) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) X2, Y1, Y2, Y_view = roi_helpers.calc_iou_new( R, img_data, C, C.class_mapping) pos_samples = np.where(Y1[0, :, -1] == 0) sel_samples = pos_samples[0].tolist() R = X2[0, sel_samples, :] for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: # pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr, P_view] = model_classifier.predict([X, ROIs]) iner_f = model_inner.predict([X, ROIs]) # oo = model_classifier_only.predict([F, ROIs]) for ii in range(len(sel_samples)): if np.max(P_cls[0, ii, :]) < bbox_threshold_orig or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue ## get class from the net # cls_num = np.argmax(P_cls[0, ii, :]) ## use gt class cls_num = gt_cls_num cls_name = inv_class_mapping[cls_num] cls_view = P_view[0, ii, 360 * cls_num:360 * (cls_num + 1)] # azimuths[cls_name].append(np.argmax(cls_view, axis=0)) inner_NN.append(iner_f[0, ii, :]) azimuth_dict.append(img_data['bboxes'][0]['azimuth']) except: print('failed on az {}'.format(img_data['bboxes'][0]['azimuth'])) ## calculating some mean feature map for every az with open('pickle_data/{}_NN.pickle'.format(C.weight_name), 'w') as f: pickle.dump([inner_NN, azimuth_dict], f) print('saved PICKLE') with open('pickle_data/{}_NN.pickle'.format(C.weight_name)) as f: inner_NN, azimuth_dict = pickle.load(f) neigh = KNeighborsClassifier(n_neighbors=1) neigh.fit(inner_NN, azimuth_dict) jj = 0 for im_file in all_imgs: jj += 1 if jj % 50 == 0: print(jj) filepath = im_file['filepath'] img = cv2.imread(filepath) img_gt = np.copy(img) if img is None: not_good += 1 continue else: good_img += 1 # print ('im num {}'.format(good_img)) X, ratio = format_img(img, C) if backend == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN Y1, Y2 = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] width, height = int(im_file["width"]), int(im_file["height"]) resized_width, resized_height = data_generators.get_new_img_size( width, height, C.im_size) # [_,_, F] = model_rpn.predict(X) ROIs = [] ## pass on all the labels in the image, some of them are not equal to test_cls for bbox_gt in im_file['bboxes']: no_bbox_flag = 1 bbox_threshold = bbox_threshold_orig if not bbox_gt['class'] == test_cls: continue if bbox_gt[ 'class'] == test_cls and bbox_threshold == bbox_threshold_orig: obj_num += 1 while no_bbox_flag and bbox_threshold > th_bbox: cls_gt = bbox_gt['class'] az_gt = bbox_gt['azimuth'] el_gt = bbox_gt['elevation'] t_gt = bbox_gt['tilt'] if len(ROIs) == 0: # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} azimuths = {} inner_res = {} # print ('obj num {}'.format(obj_num)) for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype( ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr, P_view] = model_classifier.predict([X, ROIs]) inner_out = model_inner.predict([X, ROIs]) # oo = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[ 0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue ## get class from the net # cls_num = np.argmax(P_cls[0, ii, :]) ## use gt class cls_num = gt_cls_num cls_name = inv_class_mapping[cls_num] cls_view = P_view[0, ii, 360 * cls_num:360 * (cls_num + 1)] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] azimuths[cls_name] = [] inner_res[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) azimuths[cls_name].append( np.argmax(cls_view, axis=0)) inner_res[cls_name].append(inner_out[0, ii, :]) # cv2.rectangle(img_gt, (bbox_gt['x1'], bbox_gt['y1']), (bbox_gt['x2'], bbox_gt['y2']), (int(class_to_color[test_cls][0]), int(class_to_color[test_cls][1]), int(class_to_color[test_cls][2])), 2) for key in bboxes: # if 1: if key == test_cls and bbox_gt['class'] == test_cls: bbox = np.array(bboxes[key]) prob = np.array(probs[key]) azimuth = np.array(azimuths[key]) inner_result = np.array(inner_res[key]) # img = draw_bbox(img,bbox, prob, azimuth, ratio) azimuth = neigh.predict(inner_result) ## get the azimuth from bbox that have more than 'overlap_thresh' overlap with gt_bbox az = [] overlap_thresh = 0.5 try: while np.size(az) == 0 and overlap_thresh > 0: _, prob_bbox, az = roi_helpers.overlap_with_gt( bbox, prob, azimuth, bbox_gt, ratio=ratio, overlap_thresh=overlap_thresh, max_boxes=300, use_az=True) overlap_thresh -= 0.1 if overlap_thresh == 0: print("No good Bbox was found") counts = np.bincount(az) except: az = [] counts = [] try: az_fin = np.argmax(counts) true_bin = find_interval(az_gt, azimuth_vec) prob_bin = find_interval(az_fin, azimuth_vec) no_bbox_flag = 0 if true_bin == prob_bin: count += 1 break except: # print('here') no_bbox_flag = 1 bbox_threshold -= 0.1 ## azimuth calculations ## display bbox_threshold -= 0.1 succ = float(count) / float(obj_num) * 100. print( 'for class {} -true count is {} out of {} from {} images . {} success'. format(test_cls, count, obj_num, good_img, succ)) return succ
def Test_frcnn(test_path, config_filename, num_rois=32, network="vgg", terminal_flag=False): """ Test the object detection network test_path --str: Full Path to the folder containing the test images (No default) config_filename --str: Full path to the config_file.pickle, generated while training (No default) num_rois --int: number of ROIs to process at once (Default 32) network --str: The base network to use (One of 'vgg','resnet50') (Default 'vgg') terminal_flag --bool: Flag to test if accessing from terminal do not pass anything to it while calling this function OUTPUT: When the script is called from terminal the images are displayed using opencv (images are in BGR format) When called as a function returns the images, dets as 2 lists (images are in RGB format) """ config_output_filename = config_filename with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) if network == 'resnet50': import keras_frcnn.resnet as nn elif network == 'vgg': import keras_frcnn.vgg as nn elif network == "mobilenet": import keras_frcnn.mobilenet as nn C.model_path = 'epoch-176.hdf5' # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False img_path = test_path def format_img_size(img, C): # utility function 1 """ formats the image size based on config """ img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: ratio = img_min_side / width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side / height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def format_img_channels(img, C): #utility function 2 """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def format_img(img, C): # utility function 3 """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = format_img_channels(img, C) return img, ratio # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): #utility function 4 real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2, real_y2) class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = int(num_rois) if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 elif C.network == 'mobilenet': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) print('Loading weights from {}'.format(C.model_path)) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') bbox_threshold = 0.8 list_of_all_images = [] list_of_all_dets = [] for idx, img_name in enumerate(sorted(os.listdir(img_path))): if not img_name.lower().endswith( ('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle( img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])), 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) if terminal_flag: print('Elapsed time = {}'.format(time.time() - st)) print(all_dets) if len(all_dets) > 0: cv2.imwrite(img_name + '_new.png', img) cv2.waitKey(0) else: list_of_all_images.append(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) list_of_all_dets.append(all_dets) if not terminal_flag: return (list_of_all_images, list_of_all_dets)
print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_data_format(), use_regr=True, overlap_thresh=0.4, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0)
st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_data_format() == 'channels_last': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_data_format(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0:
def work(input, output, textlabel, piclabel, primpiclabel): textlabel.append("Detecting now:") test_path = input + '/' output_path = output + '/' #test_path = "../big_test_input/" sys.setrecursionlimit(40000) parser = OptionParser() parser.add_option("-p", "--path", dest="test_path", help="Path to test data.", default=test_path) parser.add_option( "-n", "--num_rois", type="int", dest="num_rois", help="Number of ROIs per iteration. Higher means more memory use.", default=32) parser.add_option( "--config_filename", dest="config_filename", help= "Location to read the metadata related to the training (generated when training).", default="./config.pickle") parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50') (options, args) = parser.parse_args() if not options.test_path: # if filename is not given parser.error( 'Error: path to test data must be specified. Pass --path to command line' ) config_output_filename = options.config_filename with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) if C.network == 'resnet50': import keras_frcnn.resnet as nn elif C.network == 'vgg': import keras_frcnn.vgg as nn # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False img_path = options.test_path class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = int(options.num_rois) if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) print('Loading weights from {}'.format(C.model_path)) textlabel.append('Loading weights from {}'.format(C.model_path)) ########################这两句以后要加上################# #model_rpn.load_weights(C.model_path, by_name=True) #model_classifier.load_weights(C.model_path, by_name=True) ######################################################### model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.8 visualise = True for idx, img_name in enumerate(sorted(os.listdir(img_path))): if not img_name.lower().endswith( ('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) ##################更新窗口################ textlabel.append(img_name) oldpath = test_path + img_name beforeimage = QtGui.QPixmap(oldpath) primpiclabel.setPixmap(beforeimage) primpiclabel.setScaledContents(True) ########################################## st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) color = [0, 0, 0] if key == "airbase": color = [0, 0, 255] if key == "harbour": color = [21, 159, 235] if key == "island": color = [59, 197, 184] cv2.rectangle(img, (real_x1, real_y1), (real_x2, real_y2), color, 2) #textLabel = '{}: {}'.format(key,int(100*new_probs[jk])) #all_dets.append((key,100*new_probs[jk])) #textlabel.append('{}: {}'.format(key,100*new_probs[jk])) #(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1) #textOrg = (real_x1, real_y1-0) #cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2) #cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1) #cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) textlabel.append('Elapsed time = {}'.format(time.time() - st)) print(all_dets) #cv2.imshow('img', img) #cv2.waitKey(0) cv2.imwrite(output_path + '{}.png'.format(idx), img) afterimage = QtGui.QPixmap(output_path + '{}.png'.format(idx)) piclabel.setPixmap(afterimage) piclabel.setScaledContents(True)
def testing(test_path, num_rois=32, config_filename="config.pickle", network="resnet50", weigths_file='./model_frcnn.hdf5', output_folder='./results_imgs/'): ''' test_path = Path to testing data num_rois = Number of ROIs per iteration. Higher means more memory use config_filename = Location to read the metadata related to the training (generated when training) network = Base network to use. Supports vgg or resnet50 weigths_file = Select the path and location of .hdf5 file output_folder = Select the output folder ''' if not test_path: # if filename is not given parser.error( 'Error: path to test data must be specified. Pass --path to command line' ) config_output_filename = config_filename with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) if C.network == 'resnet50': import keras_frcnn.resnet as nn elif C.network == 'vgg': import keras_frcnn.vgg as nn C.model_path = weigths_file # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False img_path = test_path def format_img_size(img, C): """ formats the image size based on config """ img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: ratio = img_min_side / width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side / height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def format_img_channels(img, C): """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def format_img(img, C): """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = format_img_channels(img, C) return img, ratio # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2, real_y2) class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = int(num_rois) if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) print('Loading weights from {}'.format(C.model_path)) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.8 visualise = True for idx, img_name in enumerate(sorted(os.listdir(img_path))): if not img_name.lower().endswith( ('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle( img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])), 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) print(all_dets) #cv2.imshow('img', img) #cv2.waitKey(0) output_file = output_folder + '{}.png' cv2.imwrite(output_file.format(idx), img)
def train_mscoco(): # ===========================模型的配置和加载====================================== # config for data argument cfg = config.Config() cfg.use_horizontal_flips = True cfg.use_vertical_flips = True cfg.rot_90 = True cfg.num_rois = 32 #resnet前四卷积部分的权值 cfg.base_net_weights = nn.get_weight_path() #保存模型的权重值 cfg.model_path = './model/mscoco_frcnn.hdf5' #all_images, class_mapping = get_data() #加载训练的图片 train_imgs, class_mapping = get_data('train') cfg.class_mapping = class_mapping print('Num classes (including bg) = {}'.format(len(class_mapping))) #保存所有的配置文件 with open(cfg.config_save_file, 'wb') as config_f: pickle.dump(cfg, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(cfg.config_save_file)) #图片随机洗牌 random.shuffle(train_imgs) print('Num train samples {}'.format(len(train_imgs))) data_gen_train = data_generators.get_anchor_gt(train_imgs, class_mapping, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='train') # ============================================================================== # ===============================模型的定义====================================== #keras内核为tensorflow input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base resnet50 network shared_layers = nn.nn_base(img_input, trainable=False) # define the RPN, built on the base layers num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, cfg.num_rois, nb_classes=len(class_mapping), trainable=True) #model(input=,output=) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # ============================================================================== # ===========================基本模型加载ImageNet权值============================= try: print('loading base model weights from {}'.format( cfg.base_net_weights)) model_rpn.load_weights(cfg.base_net_weights, by_name=True) model_classifier.load_weights(cfg.base_net_weights, by_name=True) except Exception as e: print('基本模型加载ImageNet权值: ', e) print('Could not load pretrained model weights on ImageNet.') # ============================================================================== # ===============================模型优化======================================== #在调用model.compile()之前初始化一个优化器对象,然后传入该函数 optimizer = Adam(lr=1e-5) optimizer_classifier = Adam(lr=1e-5) model_rpn.compile(optimizer=optimizer, loss=[ losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(class_mapping) - 1) ], metrics={'dense_class_{}'.format(len(class_mapping)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mae') # ============================================================================== # ================================训练、输出设置================================== epoch_length = len(train_imgs) num_epochs = int(cfg.num_epochs) iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf logger = Logger(os.path.join('.', 'log.txt')) # ============================================================================== print('Starting training') for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) logger.write('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor ) == epoch_length and cfg.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap' ' the ground truth boxes. Check RPN settings or keep training.' ) #图片,标准的cls、rgr,盒子数据 X, Y, img_data = next(data_gen_train) #训练rpn loss_rpn = model_rpn.train_on_batch(X, Y) #边训练rpn得到的区域送入roi #x_class, x_regr, base_layers P_rpn = model_rpn.predict_on_batch(X) result = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], cfg, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format #区域、cls、rgr、iou X2, Y1, Y2, IouS = roi_helpers.calc_iou( result, img_data, cfg, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if cfg.num_rois > 1: if len(pos_samples) < cfg.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, cfg.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) #训练classifier loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if cfg.verbose: logger.write( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) logger.write( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) logger.write( 'Loss RPN classifier: {}'.format(loss_rpn_cls)) logger.write( 'Loss RPN regression: {}'.format(loss_rpn_regr)) logger.write('Loss Detector classifier: {}'.format( loss_class_cls)) logger.write('Loss Detector regression: {}'.format( loss_class_regr)) logger.write('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if cfg.verbose: logger.write( 'Total loss decreased from {} to {}, saving weights' .format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights(cfg.model_path) break except Exception as e: print('Exception: {}'.format(e)) # save model model_all.save_weights(cfg.model_path) continue print('Training complete, exiting.')
def train_model(dataset_directory: str, model_name: str, delete_and_recreate_dataset_directory: bool, configuration_name: str, output_weight_path: str, configuration_filename: str, number_of_epochs: int, early_stopping: int, learning_rate_reduction_patience: int, learning_rate_reduction_factor: float, non_max_suppression_overlap_threshold: float, non_max_suppression_max_boxes: int, input_weight_path: str = None): muscima_pp_raw_dataset_directory = os.path.join(dataset_directory, "muscima_pp_raw") muscima_image_directory = os.path.join(dataset_directory, "cvcmuscima_staff_removal") muscima_cropped_directory = os.path.join(dataset_directory, "muscima_pp_cropped_images") if not dataset_directory: # if filename is not given parser.error( 'Error: path to training data must be specified. Pass --path to command line' ) network = NetworkFactory.get_network_by_name(model_name) try: all_images, classes_count, class_mapping = get_data( muscima_cropped_directory) data_loaded = True except: print( "Could not load dataset. Automatically downloading and recreating dataset." ) data_loaded = False delete_and_recreate_dataset_directory = True if delete_and_recreate_dataset_directory: print("Deleting dataset directory {0}".format(dataset_directory)) if os.path.exists(dataset_directory): shutil.rmtree(dataset_directory) downloader = MuscimaPlusPlusDatasetDownloader( muscima_pp_raw_dataset_directory) downloader.download_and_extract_dataset() downloader = CvcMuscimaDatasetDownloader( muscima_image_directory, CvcMuscimaDataset.StaffRemoval) downloader.download_and_extract_dataset() delete_unused_images(muscima_image_directory) inverter = ImageInverter() # We would like to work with black-on-white images instead of white-on-black images inverter.invert_images(muscima_image_directory, "*.png") shutil.copy("Staff-Vertical-Positions.txt", dataset_directory) cut_images( muscima_image_directory, os.path.join(dataset_directory, "Staff-Vertical-Positions.txt"), muscima_cropped_directory, muscima_pp_raw_dataset_directory) # pass the settings from the command line, and persist them in the config object C = ConfigurationFactory.get_configuration_by_name(configuration_name) C.model_path = output_weight_path start_time = time.time() if not data_loaded: all_images, classes_count, class_mapping = get_data( muscima_cropped_directory) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping # inv_map = {v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) print( 'Hyperparameters: {0} RoIs generated per run with {1} boxes remaining from non-max suppression and using ' 'non-max suppression threshold of {2:.2f}'.format( C.num_rois, non_max_suppression_max_boxes, non_max_suppression_overlap_threshold)) config_output_filename = configuration_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(config_output_filename)) random.seed(1) random.shuffle(all_images) # num_imgs = len(all_images) train_imgs = [s for s in all_images if s['imageset'] == 'train'] val_imgs = [s for s in all_images if s['imageset'] == 'val'] print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) if not use_fast_data_generators: print("Using standard data_generator") data_gen_train = data_generators.get_anchor_gt( train_imgs, classes_count, C, network.get_img_output_length, mode='train') data_gen_val = data_generators.get_anchor_gt( val_imgs, classes_count, C, network.get_img_output_length, mode='val') else: print("Using fast data_generator") data_gen_train = data_generators_fast.get_anchor_gt( train_imgs, classes_count, C, network.get_img_output_length, mode='train') data_gen_val = data_generators_fast.get_anchor_gt( val_imgs, classes_count, C, network.get_img_output_length, mode='val') input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = network.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = network.rpn(shared_layers, num_anchors) classifier = network.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) start_of_training = datetime.date.today() tensorboard_callback = TensorBoard(log_dir="./logs/{0}_{1}/".format( start_of_training, configuration_name)) tensorboard_callback.set_model(model_all) try: print('Loading weights from {0}'.format(input_weight_path)) model_rpn.load_weights(input_weight_path, by_name=True) model_classifier.load_weights(input_weight_path, by_name=True) except: print( 'Could not load pretrained model weights. Weights can be found in the keras application folder \ https://github.com/fchollet/keras/tree/master/keras/applications') optimizer = Adadelta() optimizer_classifier = Adadelta() model_rpn.compile(optimizer=optimizer, loss=[ faster_rcnn_losses.rpn_loss_cls(num_anchors), faster_rcnn_losses.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ faster_rcnn_losses.class_loss_cls, faster_rcnn_losses.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer=Adadelta(), loss='mae') epoch_length = 1000 validation_epoch_length = len(val_imgs) validation_interval = 1 losses = np.zeros((epoch_length, 5)) losses_val = np.zeros((validation_epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] best_loss_training = np.inf best_loss_epoch = 0 best_total_loss_validation = np.Inf best_loss_rpn_cls = np.inf best_loss_rpn_regr = np.inf best_loss_class_cls = np.inf best_loss_class_regr = np.inf best_class_acc = 0.0 model_classifier.summary() print(C.summary()) print('Starting training') train_names = [ 'train_loss_rpn_cls', 'train_loss_rpn_reg', 'train_loss_class_cls', 'train_loss_class_reg', 'train_total_loss', 'train_acc' ] val_names = [ 'val_loss_rpn_cls', 'val_loss_rpn_reg', 'val_loss_class_cls', 'val_loss_class_reg', 'val_total_loss', 'val_acc' ] epochs_without_improvement = 0 for epoch_num in range(number_of_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, number_of_epochs)) for iter_num in range(epoch_length): try: if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( '\nAverage number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi( P_rpn[0], P_rpn[1], C, use_regr=True, overlap_thresh=non_max_suppression_overlap_threshold, max_boxes=non_max_suppression_max_boxes) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] progbar.update( iter_num + 1, [('rpn_cls', np.mean(losses[:iter_num + 1, 0])), ('rpn_regr', np.mean(losses[:iter_num + 1, 1])), ('detector_cls', np.mean(losses[:iter_num + 1, 2])), ('detector_regr', np.mean(losses[:iter_num + 1, 3]))]) except Exception as e: print('Exception during training: {}'.format(e)) continue # Calculate losses after the specified number of iterations loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float( sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print('[INFO TRAINING]') print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print('Classifier accuracy for bounding boxes from RPN: {}'.format( class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format(loss_class_cls)) print('Loss Detector regression: {}'.format(loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) print("Best loss for training: {0:.3f}".format(best_loss_training)) curr_total_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr val_start_time = time.time() write_log(tensorboard_callback, train_names, [ loss_rpn_cls, loss_rpn_regr, loss_class_cls, loss_class_regr, curr_total_loss, class_acc ], epoch_num) if curr_total_loss < best_loss_training: model_path = C.model_path[:-5] + "_training.hdf5" if C.verbose: print( 'Total training loss decreased from {0:.3f} to {1:.3f}, saving weights to {2}' .format(best_loss_training, curr_total_loss, model_path)) best_loss_training = curr_total_loss model_all.save_weights(model_path) ############# # VALIDATION ############# if (epoch_num + 1) % validation_interval != 0: continue progbar = generic_utils.Progbar(validation_epoch_length) for iter_num in range(validation_epoch_length): try: X, Y, img_data = next(data_gen_val) loss_rpn = model_rpn.test_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi( P_rpn[0], P_rpn[1], C, use_regr=True, overlap_thresh=non_max_suppression_overlap_threshold, max_boxes=non_max_suppression_max_boxes) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.test_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses_val[iter_num, 0] = loss_rpn[1] losses_val[iter_num, 1] = loss_rpn[2] losses_val[iter_num, 2] = loss_class[1] losses_val[iter_num, 3] = loss_class[2] losses_val[iter_num, 4] = loss_class[3] progbar.update( iter_num + 1, [('rpn_cls', np.mean(losses_val[:iter_num + 1, 0])), ('rpn_regr', np.mean(losses_val[:iter_num + 1, 1])), ('detector_cls', np.mean(losses_val[:iter_num + 1, 2])), ('detector_regr', np.mean(losses_val[:iter_num + 1, 3]))]) except Exception as e: print('Exception during validation: {}'.format(e)) continue # Computer aggregated losses loss_rpn_cls = np.mean(losses_val[:, 0]) loss_rpn_regr = np.mean(losses_val[:, 1]) loss_class_cls = np.mean(losses_val[:, 2]) loss_class_regr = np.mean(losses_val[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float( sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] curr_total_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr write_log(tensorboard_callback, val_names, [ loss_rpn_cls, loss_rpn_regr, loss_class_cls, loss_class_regr, curr_total_loss, class_acc ], epoch_num) if C.verbose: print('[INFO VALIDATION]') print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print('Classifier accuracy for bounding boxes from RPN: {}'.format( class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format(loss_class_cls)) print('Loss Detector regression: {}'.format(loss_class_regr)) print( "Current validation loss: {0:.3f}, Best validation loss: {1:.3f} at epoch: {2}" .format(curr_total_loss, best_total_loss_validation, best_loss_epoch)) print('Elapsed time: {}'.format(time.time() - val_start_time)) if curr_total_loss < best_total_loss_validation: if C.verbose: print( 'Total validation loss decreased from {0:.3f} to {1:.3f}, saving weights to {2}' .format(best_total_loss_validation, curr_total_loss, C.model_path)) best_total_loss_validation = curr_total_loss best_loss_rpn_cls = loss_rpn_cls best_loss_rpn_regr = loss_rpn_regr best_loss_class_cls = loss_class_cls best_loss_class_regr = loss_class_regr best_class_acc = class_acc best_loss_epoch = epoch_num model_all.save_weights(C.model_path) epochs_without_improvement = 0 else: epochs_without_improvement += validation_interval if epochs_without_improvement > early_stopping: print( "Early stopping training after {0} epochs without improvement on validation set" .format(epochs_without_improvement)) break if epochs_without_improvement > learning_rate_reduction_patience: current_learning_rate = K.get_value(model_classifier.optimizer.lr) new_learning_rate = current_learning_rate * learning_rate_reduction_factor print( "Not improved validation accuracy for {0} epochs. Reducing learning rate from {1} to {2}" .format(learning_rate_reduction_patience, current_learning_rate, new_learning_rate)) K.set_value(model_classifier.optimizer.lr, new_learning_rate) K.set_value(model_rpn.optimizer.lr, new_learning_rate) K.set_value(model_all.optimizer.lr, new_learning_rate) end_time = time.time() execution_time_in_seconds = round(end_time - start_time) print("Execution time: {0:.1f}s".format(end_time - start_time)) notification_message = "Training on {0} dataset with model {1} and configuration {2} finished. " \ "Val. accuracy: {3:0.5f}%".format("muscima_pp", model_name, configuration_name, best_class_acc * 100) TelegramNotifier.send_message_via_telegram(notification_message) today = "{0:02d}.{1:02d}.{2}".format(start_of_training.day, start_of_training.month, start_of_training.year) GoogleSpreadsheetReporter.append_result_to_spreadsheet( dataset_size=len(all_images), model_name=model_name, configuration_name=configuration_name, data_augmentation="", early_stopping=early_stopping, reduction_patience=learning_rate_reduction_patience, learning_rate_reduction_factor=learning_rate_reduction_factor, optimizer="Adadelta", initial_learning_rate=1.0, non_max_suppression_overlap_threshold= non_max_suppression_overlap_threshold, non_max_suppression_max_boxes=non_max_suppression_max_boxes, validation_accuracy=best_class_acc, validation_total_loss=best_total_loss_validation, best_loss_rpn_cls=best_loss_rpn_cls, best_loss_rpn_regr=best_loss_rpn_regr, best_loss_class_cls=best_loss_class_cls, best_loss_class_regr=best_loss_class_regr, date=today, datasets="muscima_pp", execution_time_in_seconds=execution_time_in_seconds)
'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi( P_rpn[0], P_rpn[1], C, K. image_dim_ordering( #could try out different overlap_threshold ), use_regr=True, overlap_thresh=0.8, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0)
def main(): cleanup() sys.setrecursionlimit(40000) config_output_filename = 'keras_frcnn/config.pickle' with open(config_output_filename, 'r') as f_in: C = pickle.load(f_in) # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.iteritems()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = num_rois if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (1024, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, 1024) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.8 visualise = True print("Converting video to images..") convert_to_images() print("anotating...") list_files = sorted(get_file_names(img_path), key=lambda var: [ int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var) ]) for img_name in list_files: if not img_name.lower().endswith( ('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X = format_img(img, C) img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy() img_scaled[:, :, 0] += 123.68 img_scaled[:, :, 1] += 116.779 img_scaled[:, :, 2] += 103.939 img_scaled = img_scaled.astype(np.uint8) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append( [16 * x, 16 * y, 16 * (x + w), 16 * (y + h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] all_objects = [] people_count = 0 for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] cv2.rectangle(img_scaled, (x1, y1), (x2, y2), class_to_color[key], 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) if key == "person": people_count = people_count + 1 all_objects.append((key, 1)) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (x1, y1 - 0) cv2.rectangle( img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) height, width, channels = img_scaled.shape cv2.rectangle(img_scaled, (0, 0), (width, 30), (0, 0, 0), -1) cv2.putText(img_scaled, "Obj count: " + str(list(accumulate(all_objects))), (5, 19), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1) cv2.imwrite(os.path.join(output_path, img_name), img_scaled) print "Nr of people in this room: %d" % people_count now = datetime.datetime.now() f = open('people-count.log', 'a+') f.write("Nr of people in this room: %d\n" % people_count) f.close() print(all_dets)
def predict_image(file_path): global graph print(file_path) st = time.time() img = cv2.imread(file_path) image_name = os.path.split(file_path)[-1] X = format_img(img, C) img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy() img_scaled[:, :, 0] += 123.68 img_scaled[:, :, 1] += 116.779 img_scaled[:, :, 2] += 103.939 img_scaled = img_scaled.astype(np.uint8) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) with graph.as_default(): # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append( [16 * x, 16 * y, 16 * (x + w), 16 * (y + h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] cv2.rectangle(img_scaled, (x1, y1), (x2, y2), class_to_color[key], 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (x1, y1 - 0) cv2.rectangle( img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img_scaled, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) cv2.imwrite('./static/uploadImage/{}process.jpg'.format(image_name), img_scaled) print(all_dets)
print(img_name) st = time.time() filepath = os.path.join(img_path,img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0]//C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0]//C.num_rois:
def map_main(config_output_filename, img_path, weights_filename, num_rois=32, parser='pascal_voc'): if parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError( "Command line option parser must be one of 'pascal_voc' or 'simple'" ) with open(config_output_filename, 'r') as f_in: C = pickle.load(f_in) C.model_path = weights_filename class_mapping = C.class_mapping # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.iteritems()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = num_rois input_shape_img = (None, None, 3) input_shape_features = (None, None, 512) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs, _, _ = get_data(img_path) test_imgs = [s for s in all_imgs if s['imageset'] == 'test'] T = {} P = {} final_map = None for idx, img_data in enumerate(test_imgs): print('{}/{}'.format(idx, len(test_imgs))) st = time.time() filepath = img_data['filepath'] img = cv2.imread(filepath) X, fx, fy = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {}
else: pass st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, overlap_thresh=0.7, max_boxes=300) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break
def uploadtest(request): if request.FILES: str_info = '' pic = request.FILES.get('pic') sava_path = '%s/test/%s' % (settings.MEDIA_ROOT, pic.name) with open(sava_path, 'wb') as f: for content in pic.chunks(): f.write(content) print('-->图片上传成功...') str_info += '-->图片上传成功...\n' # 以下是测试过程: sys.setrecursionlimit(40000) config_output_filename = os.path.join(settings.CONFIG_BISHE, 'bishe/config.pickle') # print(config_output_filename) print('-->正在检测...') str_info += '-->正在检测...\n' with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) print('-->找到配置文件...') C.model_path = os.path.join(settings.CONFIG_BISHE, C.model_path) print('-->找到模型信息...') str_info += '-->找到模型信息...\n' print('-->模型路径地址:' + C.model_path) str_info += '-->模型路径地址:' + C.model_path + '\n' if C.network == 'resnet50': import keras_frcnn.resnet as nn elif C.network == 'vgg': import keras_frcnn.vgg as nn # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False C.num_rois = 10 def format_img_size(img, C): """ formats the image size based on config """ img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: ratio = img_min_side / width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side / height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def format_img_channels(img, C): """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def format_img(img, C): """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = format_img_channels(img, C) return img, ratio # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2, real_y2) class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print('-->乳腺癌病症种类:' + str(class_mapping)) str_info += '-->乳腺癌病症种类:' + str(class_mapping) + '\n' class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping} if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) print('-->由 {} 加载权重信息...'.format(C.model_path)) str_info += '-->由 {} 加载权重信息...'.format(C.model_path) + '\n' model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.8 st = time.time() filepath = sava_path img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: # pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append( [C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle(img, (real_x1, real_y1), (real_x2, real_y2), ( int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])), 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) timeused = (time.time() - st) print('-->检测完成,用时: {}...'.format(timeused)) str_info += '-->检测完成,用时: {}'.format(timeused) + '\n' aaa = str(all_dets) print('-->检测结果:' + aaa) str_info += '-->检测结果:' + aaa + '\n' result_path = '%s/result/%s' % (settings.MEDIA_ROOT, pic.name) cv2.imwrite(result_path, img) print(str_info) # 将图片路径回传,为上传数据库做准备 test_pic = '/static/media/test/%s' % (pic.name) result_pic = '/static/media/result/%s' % (pic.name) user_id = UserInfo.objects.get(username=request.session.get('username')).id return JsonResponse( {'res': 1, 'result_pic': result_pic, 'test_pic': test_pic, 'user_id': user_id, 'str_info': str_info}) return JsonResponse({'res': 0})
def predict_single_image(img_path, model_rpn, model_classifier_only, cfg, class_mapping): st = time.time() img = cv2.imread(img_path) if img is None: print('reading image failed.') exit(0) X, ratio = format_img(img, cfg) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) # this is result contains all boxes, which is [x1, y1, x2, y2] result = roi_helpers.rpn_to_roi(Y1, Y2, cfg, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) result[:, 2] -= result[:, 0] result[:, 3] -= result[:, 1] bbox_threshold = 0.8 # apply the spatial pyramid pooling to the proposed regions boxes = dict() for jk in range(result.shape[0] // cfg.num_rois + 1): rois = np.expand_dims(result[cfg.num_rois * jk:cfg.num_rois * (jk + 1), :], axis=0) if rois.shape[1] == 0: break if jk == result.shape[0] // cfg.num_rois: # pad R curr_shape = rois.shape target_shape = (curr_shape[0], cfg.num_rois, curr_shape[2]) rois_padded = np.zeros(target_shape).astype(rois.dtype) rois_padded[:, :curr_shape[1], :] = rois rois_padded[0, curr_shape[1]:, :] = rois[0, 0, :] rois = rois_padded [p_cls, p_regr] = model_classifier_only.predict([F, rois]) import pdb pdb.set_trace() for ii in range(p_cls.shape[1]): if np.max(p_cls[0, ii, :]) < bbox_threshold: # if np.max(p_cls[0, ii, :]) < bbox_threshold or np.argmax(p_cls[0, ii, :]) == (p_cls.shape[2] - 1): # if predicted class was "bg", skip to append to boxes. continue cls_num = np.argmax(p_cls[0, ii, :]) if cls_num not in boxes.keys(): boxes[cls_num] = [] (x, y, w, h) = rois[0, ii, :] try: (tx, ty, tw, th) = p_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= cfg.classifier_regr_std[0] ty /= cfg.classifier_regr_std[1] tw /= cfg.classifier_regr_std[2] th /= cfg.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except Exception as e: print(e) pass boxes[cls_num].append([ cfg.rpn_stride * x, cfg.rpn_stride * y, cfg.rpn_stride * (x + w), cfg.rpn_stride * (y + h), np.max(p_cls[0, ii, :]) ]) # add some nms to reduce many boxes for cls_num, box in boxes.items(): boxes_nms = roi_helpers.non_max_suppression_fast(box, overlap_thresh=0.5) boxes[cls_num] = boxes_nms print(class_mapping[cls_num] + ":") for b in boxes_nms: b[0], b[1], b[2], b[3] = get_real_coordinates( ratio, b[0], b[1], b[2], b[3]) print('{} prob: {}'.format(b[0:4], b[-1])) img = draw_boxes_and_label_on_image_cv2(img, class_mapping, boxes) print('Elapsed time = {}'.format(time.time() - st)) # cv2.imshow('image', img) result_path = './results_images/{}.png'.format( os.path.basename(img_path).split('.')[0]) print('result saved into ', result_path) cv2.imwrite(result_path, img) cv2.waitKey(0)
def predict(self, img_path): all_imgs = [] classes = {} bbox_threshold = 0.8 st = time.time() img = cv2.imread(img_path) X, ratio = format_img(img, self.C) print(K.image_data_format()) if K.image_data_format() == 'tf' or K.image_data_format( ) == 'channels_last': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = self.model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, self.C, K.image_data_format(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // self.C.num_rois + 1): ROIs = np.expand_dims(R[self.C.num_rois * jk:self.C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // self.C.num_rois: # pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], self.C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = self.model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= self.C.classifier_regr_std[0] ty /= self.C.classifier_regr_std[1] tw /= self.C.classifier_regr_std[2] th /= self.C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ self.C.rpn_stride * x, self.C.rpn_stride * y, self.C.rpn_stride * (x + w), self.C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] candidates = [] # 모든 객체 후보 영역이 담길 list for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.6) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) real_points = TwoPoint(real_x1 + 10, real_y1 + 10, real_x2 - 10, real_y2 - 10) candidates.append(real_points) cv2.rectangle(img, (real_x1 + 10, real_y1 + 10), (real_x2 - 10, real_y2 - 10), (255, 0, 0), 2) # cv2.rectangle(img,(x1, y1), (x2, y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])), 3) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle(img, (textOrg[0], textOrg[1]), (textOrg[0] + retval[0], textOrg[1] - retval[1]), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) min_x1 = 10 min_y1 = 10 max_x2 = 0 max_y2 = 0 for c in candidates: # 허용된 범위를 넘어가는 bbox에 대한 최대 좌표 수정 if c.x1 <= 0: c.x1 = 10 if c.y1 <= 0: c.y1 = 10 if c.x2 >= IMAGE_MAX_X: c.x2 = IMAGE_MAX_X - 10 if c.y2 >= IMAGE_MAX_Y: c.y2 = IMAGE_MAX_Y - 10 if min_x1 > c.x1: # 객체들의 영역을 하나의 영역으로 합치는 과정 min_x1 = c.x1 if min_y1 > c.y1: min_y1 = c.y1 if max_x2 < c.x2: max_x2 = c.x2 if max_y2 < c.y2: max_y2 = c.y2 # 각 객체의 최소점, 최대점을 계산하여 하나로 합침 cv2.rectangle(img, (min_x1, min_y1), (max_x2, max_y2), (255, 0, 255), 2) print('Elapsed time = {}'.format(time.time() - st)) print(all_dets) cv2.imshow('img', img) cv2.waitKey(0)
def run_prediction(config_filename, model_path, test_path, out_path, network='resnet50', num_rois=32): with open(config_filename, 'rb') as f_in: C = pickle.load(f_in) if C.network == 'resnet50': import keras_frcnn.resnet as nn elif C.network == 'vgg': import keras_frcnn.vgg as nn # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False img_path = test_path class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} #print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = int(num_rois) if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) C.model_path = model_path #print('Loading weights from {}'.format(C.model_path)) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.9 # default is 0.8 visualise = True preresults = [] # add this for idx, img_name in enumerate(sorted(os.listdir(img_path))): if not img_name.lower().endswith( ('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue #print(img_name) #st = time.time() filepath = os.path.join(img_path, img_name) img = cv2.imread(filepath) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # default is 0.7 # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] bbox_results = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.4) # default is 0.5 for jk in range(new_boxes.shape[0]): img_use = img.copy() (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) #cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2) cv2.rectangle(img_use, (real_x1, real_y1), (real_x2, real_y2), (0, 0, 255), 4) #textLabel = '{}: {}'.format(key,int(100*new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) bbox_results.append( (key, (real_x1, real_y1, real_x2, real_y2))) #(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1) #textOrg = (real_x1, real_y1-0) #cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2) #cv2.rectangle(img, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1) #cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) if not os.path.exists(out_path): os.makedirs(out_path) page_id = int(img_name.split('.')[0].split('_')[-1]) + 1 page_name = 'page_' + str(page_id) cv2.imwrite( os.path.join( out_path, '{}.jpg'.format(page_name + '_id_' + str(jk + 1))), img_use) #format(idx) #print('Elapsed time = {}'.format(time.time() - st)) #print(all_dets) #print(bbox_results) preresults.append([img_name, bbox_results]) ## add this # cv2.imshow('img', img) # cv2.waitKey(0) #outpath = '/data/projects/table_parser/results/result_scanned/' #if not os.path.exists(out_path): #os.makedirs(out_path) #cv2.imwrite(out_path + '{}.jpg'.format(img_name.split('.')[0]),img) #format(idx) preresults = pd.DataFrame(preresults) preresults.to_csv(os.path.join(out_path, 'preresults.txt'), header=None, index=None)
def detect_known_objects(self, img): print("HELLLOOOOOO") #img = self.image_resize(img, height=int(img.shape[0]/3.0)) #img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) #img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) #img=cv2.cvtColor(img_yuv,cv2.COLOR_YUV2BGR) X, ratio = self.format_img(img, self.C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = self.model_rpn.predict(X) #print Y1, Y2, F a = datetime.datetime.now() R = roi_helpers.rpn_to_roi(Y1, Y2, self.C, K.image_dim_ordering(), overlap_thresh=0.7) b = datetime.datetime.now() delta = b - a #print("roi_helpers.rpn_to_roi took:", int(delta.total_seconds() * 1000)) # milliseconds #print R #for i in R: # cv2.rectangle(img,(i[0],i[1]),(i[2],i[3]),(0,255,0),3) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for idx, jk in enumerate(range(R.shape[0] // self.C.num_rois + 1)): ROIs = np.expand_dims(R[self.C.num_rois * jk:self.C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // self.C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], self.C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded #print("ROIs shape", np.array(ROIs).shape) #print("F", np.array(F).shape) a = datetime.datetime.now() [P_cls, P_regr, P_clust] = self.model_classifier_only.predict([F, ROIs]) b = datetime.datetime.now() delta = b - a #print("prediction of roi took: :", int(delta.total_seconds() * 1000)) # milliseconds #print P_cls, P_regr #print P_cls.shape, P_regr.shape for ii in range(P_cls.shape[1]): #print P_cls[0,ii,:] if np.max(P_cls[0, ii, :]) < self.bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = self.class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] #print x, y, w, h cls_num = np.argmax(P_cls[0, ii, :]) #print "something", cls_num try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= self.C.classifier_regr_std[0] ty /= self.C.classifier_regr_std[1] tw /= self.C.classifier_regr_std[2] th /= self.C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: print("exception") pass bboxes[cls_name].append([ self.C.rpn_stride * x, self.C.rpn_stride * y, self.C.rpn_stride * (x + w), self.C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) detected_objects = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = self.get_real_coordinates(ratio, x1, y1, x2, y2) #print "drawing detected rect at:", (real_x1, real_y1), (real_x2, real_y2) #cv2.rectangle(img,(real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),5) #textLabel = '{}: {}'.format(key,int(100*new_probs[jk])) #all_dets.append((key,100*new_probs[jk])) #(retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1) #textOrg = (real_x1-20, real_y1-20) #cv2.rectangle(img, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] +5, textOrg[1]-retval[1] +5), (0, 0, 0), 2) #cv2.rectangle(img, (textOrg[0] -5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] +5, textOrg[1]-retval[1] +5), (255, 255, 255), -1) #cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 0.3, (0, 0, 0), 1) height, width, channels = img.shape #FOV horizontal = 62 degrees (from 90 on right to 33 on left) angle_between_robot_centre_and_detected_object = self.angle_between( (real_x1 + self.distance([real_x1, real_y1], [real_x2, real_y1]) / 2.0, (real_y1 + self.distance([real_x1, real_y1], [real_x1, real_y2]) / 2.0)), (width / 2.0, 0)) - 52.0 angle_between_robot_centre_and_detected_object = -angle_between_robot_centre_and_detected_object angle_between_robot_centre_and_detected_object = calc_angle([ int((real_x1 + real_x2) / 2.0), int((real_y1 + real_y2) / 2.0) ]) angle, distance = calculate_angle_and_distance(img, real_x1, real_x2, real_y1, real_y2, obj_width=16) angle_between_robot_centre_and_detected_object = angle focal_length_mm = 1.0 average_real_object_height_mm = 1.0 image_height_px = height object_height_px = self.distance([real_x1, real_y1], [real_x1, real_y2]) sensor_height_mm = 314.2 distance_between_robot_centre_and_detected_object = (15.0 / ( (min(self.distance([real_x1, real_y1], [real_x2, real_y1]), self.distance([real_x1, real_y1], [real_x1, real_y2])) / max(self.distance([real_x1, real_y1], [real_x2, real_y1]), self.distance([real_x1, real_y1], [real_x1, real_y2])) )) * 123) / self.distance([real_x1, real_y1], [real_x2, real_y1]) distance_between_robot_centre_and_detected_object = distance_between_robot_centre_and_detected_object * 5.0 distance_between_robot_centre_and_detected_object = distance detected_objects.append( (key, "", real_x1, real_y1, real_x2, real_y2, distance_between_robot_centre_and_detected_object, angle_between_robot_centre_and_detected_object)) print("detected objects", len(detected_objects)) temporary_memory = [] for image_item in detected_objects: seen_item_centroid = (image_item[2] + self.distance( (image_item[2], image_item[3]), (image_item[4], image_item[3])) / 2.0, image_item[4] + self.distance( (image_item[2], image_item[3]), (image_item[2], image_item[5])) / 2.0) tracking_uuid = None #print ("items in memory", len(self.SHORT_TERM_MEMORY)) for memory_item in self.SHORT_TERM_MEMORY: memory_centroid = (memory_item[2] + self.distance( (memory_item[2], memory_item[3]), (memory_item[4], memory_item[3])) / 2.0, memory_item[4] + self.distance( (memory_item[2], memory_item[3]), (memory_item[2], memory_item[5])) / 2.0) #print ("distance", self.distance(seen_item_centroid, memory_centroid)) if self.distance(seen_item_centroid, memory_centroid) < self.distance( [image_item[2], image_item[3]], [image_item[4], image_item[5] ]) and image_item[0] == memory_item[0]: tracking_uuid = memory_item[1] continue if tracking_uuid != None: temporary_memory.append( (self.KNOWN_OBJECTS[int(image_item[0])], tracking_uuid, image_item[2], image_item[3], image_item[4], image_item[5], image_item[6], image_item[7])) else: temporary_memory.append( (self.KNOWN_OBJECTS[int(image_item[0])], uuid.uuid1(), image_item[2], image_item[3], image_item[4], image_item[5], image_item[6], image_item[7])) #print ("temp memory items", len(temporary_memory)) if self.show_image: for item in temporary_memory: cv2.rectangle(img, (item[2], item[3]), (item[4], item[5]), (0, 0, 0), 5) textLabel = '{}: {}'.format(item[0], item[1]) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (image_item[2] - 20, image_item[3] - 20) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] + 5), (0, 0, 0), 2) cv2.rectangle(img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] + 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 0.3, (0, 0, 0), 1) self.SHORT_TERM_MEMORY = temporary_memory if self.show_image: cv2.imshow('image', img) cv2.waitKey(3000) #time.sleep(1) #cv2.destroyAllWindows() return self.SHORT_TERM_MEMORY
def train_fasterrcnn(): # config for data argument cfg = config.Config() cfg.balanced_classes = True cfg.use_horizontal_flips = True cfg.use_vertical_flips = True cfg.rot_90 = True cfg.num_rois = 50 #50# 对于星图杯的光学遥感飞机检测,应该改为50+ cfg.anchor_box_scales = [10, 30, 50, 80, 100] #[41,70,120,20,90] cfg.anchor_box_ratios = [[1, 1.2], [1, 1], [1.2, 1]] #[[1,1.4],[1,0.84],[1,1.17],[1,0.64],[1,1]] #cfg.rpn_stride = 8 cfg.im_size = 512 cfg.num_epochs = 100 cfg.epoch_length = 150 #1462 cfg.base_net_weights = os.path.join('./model/', nn.get_weight_path()) # TODO: the only file should to be change for other data to train cfg.model_path = './model/kitti_frcnn_last.hdf5' cfg.simple_label_file = 'DOTA2018_OpticalAircraft_bboxes.txt' #'kitti_simple_label.txt'#'E:/Xingtubei/official_datas/OpticalAircraft/laptop_Chreoc_OpticalAircraft_bboxes.txt' # '/media/liuhuaqing/Elements/Xingtubei/official_datas/OpticalAircraft/Chreoc_OpticalAircraft_bboxes.txt'#'F:/Xingtubei/official_datas/OpticalAircraft/Chreoc_OpticalAircraft_bboxes.txt' # 'kitti_simple_label.txt' all_images, classes_count, class_mapping = get_data( cfg.simple_label_file) #读取数据集,cv2.imread()要求数据里不能有中文路径 if 'bg' not in classes_count: #'bg'应该是代表背景 classes_count['bg'] = 0 # =0表示训练数据中没有“背景”这一类别 class_mapping['bg'] = len(class_mapping) cfg.class_mapping = class_mapping with open(cfg.config_save_file, 'wb') as config_f: pickle.dump(cfg, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(cfg.config_save_file)) inv_map = {v: k for k, v in class_mapping.items()} #class_mapping的逆向map print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) random.shuffle(all_images) num_imgs = len(all_images) train_imgs = [s for s in all_images if s['imageset'] == 'trainval'] #训练集,列表形式,列表中的元素是字典 val_imgs = [s for s in all_images if s['imageset'] == 'test'] #验证集,列表形式,列表中的元素是字典 print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) data_gen_train = data_generators.get_anchor_gt( train_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='train') #数据扩增,然后生成frcnn所需的训练数据(如:图片、rpn的梯度等等) data_gen_val = data_generators.get_anchor_gt( val_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='val') #数据扩增,然后生成frcnn所需的验证数据(如:图片、rpn的梯度等等) # 根据keras实际用的后端,定义相应的输入数据维度,因为两类后端的维度顺序不一样 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) #当后端是thaneo else: input_shape_img = (None, None, 3) #(None, None, 3)#当后端是tensorflow img_input = Input(shape=input_shape_img) # 输入图片 roi_input = Input(shape=(None, 4)) # 输入人工标注的roi坐标,4表示x1,y1,x2,y2 # define the base network (resnet here, can be VGG, Inception, etc) shared_layer, shared_layers_stage3, shared_layers_stage4 = nn.nn_base( img_input, trainable=True) # shared_layers是frcnn网络底部那些共享的层,在这里是ResNet。由nn定义好 # define the RPN, built on the base layers num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios) rpn_stage3 = nn.rpn(shared_layers_stage3, num_anchors) print(rpn_stage3[1].shape) rpn_stage4 = nn.rpn(shared_layers_stage4, num_anchors) # [x_class, x_regr, base_layers] print(rpn_stage4[1].shape) # x_class的shape是(?,sharelayer的w/2,sharelayer的h/2,scale数*ratio数),x_regr的shape是(?,sharelayer的w/2,sharelayer的h/2,4*scale数*ratio数) rpn = nn.rpn(shared_layer, num_anchors) print(rpn[1].shape) # 在这里合并两个rpn分支 classifier = nn.classifier(shared_layer, roi_input, cfg.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model( inputs=img_input, outputs=rpn[:2] ) #rpn网络由keras_frcnn/resnet定义。rpn[:2]的前两个元素分别表示rpn网络的分类输出和回归输出 model_classifier = Model( inputs=[img_input, roi_input], outputs=classifier) #Keras的函数式模型为Model,即广义的拥有输入和输出的模型 # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model(inputs=[img_input, roi_input], outputs=rpn[:2] + classifier) #rpn[:2]+classifier的含义是?????? try: # 尝试载入已训练网络权值 print('loading weights from {}'.format(cfg.base_net_weights)) model_rpn.load_weights(cfg.model_path, by_name=True) model_classifier.load_weights(cfg.model_path, by_name=True) except Exception as e: print(e) print( 'Could not load pretrained model weights. Weights can be found in the keras application folder ' 'https://github.com/fchollet/keras/tree/master/keras/applications') optimizer = Adam(lr=1e-5) # 定义一个Adam求解器,学习率lr optimizer_classifier = Adam(lr=1e-5) # 定义一个Adam求解器,学习率lr # num_anchors等于9 model_rpn.compile(optimizer=optimizer, loss=[ losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mae') #mae表示绝对值均差 epoch_length = cfg.epoch_length # epoch_length是一个周期的迭代次数(也等于训练数据量)。每迭代epoch_length次就检查一次是否要保存网络权值,然后重置iter_num = 0 num_epochs = int(cfg.num_epochs) iter_num = 0 # 迭代次数的初值 losses = np.zeros((epoch_length, 5)) # 初始化loss数组,记录每个周期的loss rpn_accuracy_rpn_monitor = [] # 初始化一个数组,记录rpn的训练过程中的精度变化 rpn_accuracy_for_epoch = [] # 初始化一个数组,记录rpn的每个训练周期的的精度变化 start_time = time.time() # 开始训练的时间 best_loss = np.Inf # 训练以来最小的loss class_mapping_inv = {v: k for k, v in class_mapping.items() } # class_mapping_inv是一个字典,key是目标类别编号,value是类别名称 print('Starting training') vis = True for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) # 生成一个进度条对象 print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) # 输出当前训练周期数/总周期数 while True: # 什么时候才结束这个循环?答:第247行的break(每迭代epoch_length次) try: if len( rpn_accuracy_rpn_monitor ) == epoch_length and cfg.verbose: # 每epoch_length次训练周期就在窗口显示一次RPN平均精度 mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap' ' the ground truth boxes. Check RPN settings or keep training.' ) # X是原图,如kitti尺寸是(1,600,1987,3)。 # Y是label,是有两个元素的list, # 其中第一个元素是类别,具体:shape是(1,share_layer_h,share_layer_w,2*scale数*ratio数),前一个元素为1(0)则表示是(不是)正或负样本,后一个为1(0)则表示是(不是)正样本 # 第二个元素是bbox,具体:shape是(1,share_layer_h,share_layer_w,8*scale数*ratio数),前四个元素表示是不是正样,后四个元素才是bbox#为什么来个repeat赋值给前面一半 # img_data是字典,包含文件名、尺寸、人工标记的roi和类别等 # X, Y, img_data = next(data_gen_train) #Y_1=Y[0] #Y_1=Y_1[0,:,:,:] loss_rpn = model_rpn.train_on_batch( X, Y) #为什么Y的尺寸与P_rpn的尺寸不同?为什么loss_rpn的尺寸是3,含义是什么,在哪里定义的? P_rpn = model_rpn.predict_on_batch( X) #P_rpn的尺寸是(1, 124, 38, 9) (1, 124, 38, 36) result = roi_helpers.rpn_to_roi( P_rpn[0], P_rpn[1], cfg, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) #result的尺寸是300*4 # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format # X2的尺寸是100*4,Y1的尺寸是1*100*8(8=训练集中目标类别总数),IouS尺寸是100 X2, Y1, Y2, IouS = roi_helpers.calc_iou( result, img_data, cfg, class_mapping ) #Y2的尺寸是1*1*56,56=28*2,(28=4*7)前28是coords,后28是labels(是该类别则标1) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where( Y1[0, :, -1] == 1) #Y1的尺寸是1*1*8表示分类预测结果,最后一个元素为1表示是背景 pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if cfg.num_rois > 1: if len(pos_samples) < cfg.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, cfg.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :] ]) #用rpn输出的roi输入给classifier losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: # 每迭代epoch_length次就检查一次是否要保存网络权值,然后重置iter_num = 0 loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if cfg.verbose: print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format( loss_class_cls)) print('Loss Detector regression: {}'.format( loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if cfg.verbose: print( 'Total loss decreased from {} to {}, saving weights' .format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights(cfg.model_path) break except Exception as e: print('Exception: {}'.format(e)) # save model model_all.save_weights(cfg.model_path) continue print('Training complete, exiting.')
def test_view_func(C, model_rpn, model_classifier): base_dir = os.getcwd() test_cls_all = ['aeroplane'] class_mapping = C.class_mapping inv_class_mapping = {v: k for k, v in class_mapping.iteritems()} backend = K.image_dim_ordering() filename = '/home/gilad/bar/real7.p' video_filename = "/home/gilad/ssd/keras-frcnn-master/a.mp4" write_flag = False class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } # turn off any data augmentation at test time save_flag = False visualise = False count = 0 good_img = 0 not_good = 0 mAP = 0 def format_img_size(img, C): """ formats the image size based on config """ img_min_side = float(C.im_size) (height, width, _) = img.shape if width <= height: ratio = img_min_side / width new_height = int(ratio * height) new_width = int(img_min_side) else: ratio = img_min_side / height new_width = int(ratio * width) new_height = int(img_min_side) img = cv2.resize(img, (new_width, new_height), interpolation=cv2.INTER_CUBIC) return img, ratio def format_img_channels(img, C): """ formats the image channels based on config """ img = img[:, :, (2, 1, 0)] img = img.astype(np.float32) img[:, :, 0] -= C.img_channel_mean[0] img[:, :, 1] -= C.img_channel_mean[1] img[:, :, 2] -= C.img_channel_mean[2] img /= C.img_scaling_factor img = np.transpose(img, (2, 0, 1)) img = np.expand_dims(img, axis=0) return img def draw_bbox(img, bbox, prob, azimuth, ratio): # new_boxes, new_probs, new_az = roi_helpers.non_max_suppression_fast(bbox, prob, azimuth, overlap_thresh=0.3,use_az=True) new_boxes = bbox new_az = azimuth new_probs = prob for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle( img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int( class_to_color[key][1]), int(class_to_color[key][2])), 2) # cv2.rectangle(img,(bbox_gt['x1'], bbox_gt['y1']), (bbox_gt['x2'], bbox_gt['y2']), (int(class_to_color[key][0]), int(class_to_color[key][1]), int(class_to_color[key][2])),2) # textLabel = '{}: {},azimuth : {}'.format(key,int(100*new_probs[jk]),new_az[jk]) textLabel = 'azimuth : {}'.format(new_az[jk]) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 + 15) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) return img def format_img(img, C): """ formats an image for model prediction based on config """ img, ratio = format_img_size(img, C) img = format_img_channels(img, C) return img, ratio def display_image(img): img1 = img[:, :, (2, 1, 0)] # img1=img im = Image.fromarray(img1.astype('uint8'), 'RGB') im.show() # Method to transform the coordinates of the bounding box to its original size def get_real_coordinates(ratio, x1, y1, x2, y2): ## read the training data from pickle file or from annotations real_x1 = int(round(x1 // ratio)) real_y1 = int(round(y1 // ratio)) real_x2 = int(round(x2 // ratio)) real_y2 = int(round(y2 // ratio)) return (real_x1, real_y1, real_x2, real_y2) vnum_test = 24 azimuth_vec = np.concatenate( ([0], np.linspace((360. / (vnum_test * 2)), 360. - (360. / (vnum_test * 2)), vnum_test)), axis=0) def find_interval(azimuth, azimuth_vec): for i in range(len(azimuth_vec)): if azimuth < azimuth_vec[i]: break ind = i if azimuth > azimuth_vec[-1]: ind = 1 return ind # print(rep) obj_num = 0 bbox_threshold_orig = 0.6 th_bbox = 0.3 #### open images from folder # for idx, img_name in enumerate(sorted(os.listdir(img_path))): # if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): # continue # print(img_name) # filepath = os.path.join(img_path,img_name) # img = cv2.imread(filepath)caricycle #### open images from file ## read the training data from pickle file or from annotations # class_mapping = C.class_mapping for test_cls in test_cls_all: good_img = 0 not_good = 0 count = 0 obj_num = 0 gt_cls_num = class_mapping[test_cls] print('work on class {}'.format(test_cls)) test_pickle = os.path.join( base_dir, 'pickle_data/test_data_{}.pickle'.format(test_cls)) if os.path.exists(test_pickle): with open(test_pickle) as f: all_imgs, classes_count, _ = pickle.load(f) for im_file in all_imgs: filepath = im_file['filepath'] img = cv2.imread(filepath) img_gt = np.copy(img) if img is None: not_good += 1 continue else: good_img += 1 # print ('im num {}'.format(good_img)) if good_img % 50 == 0: print("worked on {} images".format(good_img)) X, ratio = format_img(img, C) if backend == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] width, height = int(im_file["width"]), int(im_file["height"]) resized_width, resized_height = data_generators.get_new_img_size( width, height, C.im_size) # [_,_, F] = model_rpn.predict(X) ROIs = [] ## pass on all the labels in the image, some of them are not equal to test_cls for bbox_gt in im_file['bboxes']: if not bbox_gt['class'] == test_cls: continue no_bbox_flag = 1 bbox_threshold = bbox_threshold_orig while no_bbox_flag and bbox_threshold > th_bbox: cls_gt = bbox_gt['class'] az_gt = bbox_gt['azimuth'] el_gt = bbox_gt['elevation'] t_gt = bbox_gt['tilt'] if bbox_gt[ 'class'] == test_cls and bbox_threshold == bbox_threshold_orig: obj_num += 1 if len(ROIs) == 0: # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} azimuths = {} # print ('obj num {}'.format(obj_num)) for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims( R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype( ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr, P_view] = model_classifier.predict([X, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :] ) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == ( P_cls.shape[2] - 1): continue ## get class from the net # cls_num = np.argmax(P_cls[0, ii, :]) ## use gt class cls_num = gt_cls_num cls_name = inv_class_mapping[cls_num] cls_view = P_view[0, ii, 4 * cls_num:4 * (cls_num + 1)] # cls_name_gt = cls_nimg = draw_bbox(img,bbox, prob, azimuth, ratio)ame # if cls_name == cls_name_gt: # print(np.argmax(cls_view,axis=0)) if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] azimuths[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr( x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) azimuths[cls_name].append(Quat(cls_view).ra) all_dets = [] if len(bboxes) == 0: bbox_threshold -= 0.1 for key in bboxes: # if 1: if key == test_cls and bbox_gt['class'] == test_cls: bbox = np.array(bboxes[key]) prob = np.array(probs[key]) azimuth = np.array(azimuths[key]) ## get the azimuth from bbox that have more than 'overlap_thresh' overlap with gt_bbox az = [] overlap_thresh = 0.5 try: while np.size( az) == 0 and overlap_thresh > 0.3: _, prob_bbox, az = roi_helpers.overlap_with_gt( bbox, prob, azimuth, bbox_gt, ratio=ratio, overlap_thresh=overlap_thresh, max_boxes=300, use_az=True) if np.size( az) != 0 and overlap_thresh == 0.5: mAP += 1 overlap_thresh -= 0.1 if overlap_thresh == 0: print("No good Bbox was found") counts = np.bincount(az) except: az = [] counts = [] try: az_fin = np.argmax(counts) true_bin = find_interval(az_gt, azimuth_vec) prob_bin = find_interval(az_fin, azimuth_vec) no_bbox_flag = 0 if true_bin == prob_bin: count += 1 break except: # print('here') no_bbox_flag = 1 bbox_threshold -= 0.1 ## azimuth calculations ## display # if visualise: # display_image(img) # # cv2.imshow('img', img) # # cv2.waitKey(0) # if save_flag: # cv2.imwrite('./results_imgs/{}'.format(img_name),img) # # img = img[:, :, (2, 1, 0)] # # cv2.imwrite('./results_imgs/video/{}.png'.format(num),img) # # print('save') bbox_threshold -= 0.1 # if visualise: # display_image(img) succ = float(count) / float(obj_num) * 100. string = 'for class {} -true count is {} out of {} from {} images . {} success'.format( test_cls, count, obj_num, good_img, succ) print(string) mAP = float(mAP) / float(obj_num) * 100. print("MAP is {}".format(mAP)) # if write_flag: # f = open('{}_results.txt'.format(weight_name),'a') # f.write(string+'\n') # f.close() return succ, mAP
def upload_file(): print("request is ", request.files) st = time.time() content_length = request.content_length print(f"Content_length : {content_length}") print("data type is ", type(request)) print("data type of request files ", type(request.files)) data_dict = request.form.to_dict() #print(type(data_dict)) #print(data_dict['file']) #print('data from frontend',data_dict) data = (data_dict['file'].split(',')[1]) l, b = (data_dict['imgDimensions'].split(',')) l = int(l) b = int(b) print('width of image', l) print('type of l ', type(l)) print('height of image', b) #print(data) #print(len(data_dict)) #print(data) imgdata = base64.b64decode(data) print("imagedata type is", type(imgdata)) img2 = Image.open(io.BytesIO(imgdata)) print(type(img2)) #img2.show() #img = cv2.imread(img2) #print('opencv type' , type(img)) #print(type(img)) a = np.array(img2.getdata()).astype(np.float64) #print('datatype of w ', w.dtype) #b = np.ones(172800,3) #a = np.concatenate((w,b), axis=None) print('type of data to model ', type(a)) print('shape of data from frontend', a.shape) #r, c = a.shape #print('Value of r', r) """ if a.shape == (480000, 3): data = a.reshape(600, 800, 3) else: data = a.reshape(480, 640, 3) """ data = a.reshape(b, l, 3) st = time.time() parser = OptionParser() parser.add_option( "-n", "--num_rois", type="int", dest="num_rois", help="Number of ROIs per iteration. Higher means more memory use.", default=64) parser.add_option( "--config_filename", dest="config_filename", help= "Location to read the metadata related to the training (generated when training).", default="config.pickle") parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50') (options, args) = parser.parse_args() config_output_filename = options.config_filename with open(config_output_filename, 'rb') as f_in: C = pickle.load(f_in) if C.network == 'resnet50': import keras_frcnn.resnet as nn elif C.network == 'vgg': import keras_frcnn.vgg as nn # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.items()} print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } C.num_rois = int(options.num_rois) if C.network == 'resnet50': num_features = 1024 elif C.network == 'vgg': num_features = 512 if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (num_features, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) print('Loading weights from {}'.format(C.model_path)) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.6 visualise = True #if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): # continue #print(img_name) #filepath = os.path.join(img_path,img_name) img = data #cv2.imshow('img', img) #cv2.waitKey(0) X, ratio = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.6) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.6) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk, :] (real_x1, real_y1, real_x2, real_y2) = get_real_coordinates(ratio, x1, y1, x2, y2) cv2.rectangle( img, (real_x1, real_y1), (real_x2, real_y2), (int(class_to_color[key][0]), int( class_to_color[key][1]), int(class_to_color[key][2])), 2) textLabel = '{}: {}'.format(key, int(100 * new_probs[jk])) all_dets.append((key, 100 * new_probs[jk])) (retval, baseLine) = cv2.getTextSize(textLabel, cv2.FONT_HERSHEY_COMPLEX, 1, 1) textOrg = (real_x1, real_y1 - 0) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (0, 0, 0), 2) cv2.rectangle( img, (textOrg[0] - 5, textOrg[1] + baseLine - 5), (textOrg[0] + retval[0] + 5, textOrg[1] - retval[1] - 5), (255, 255, 255), -1) cv2.putText(img, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) print('number of windoiws detected', len(all_dets)) print(all_dets) r = len(all_dets) img3 = normalize(img) #plt.imshow(img) #cv2.imshow('img3', img3) #cv2.waitKey(0) K.clear_session() #data = process(data) #print('after reshape',data.shape) im2 = Image.fromarray(img.astype("uint8"), "RGB") print("im2 data type is ", type(im2)) #to_frontend = (" ".join(str(x) for x in data)) db = data.tobytes() print('type of data to database :', type(db)) todb = insertBLOB('Image007', db) print('final data shape fed to model : ', data.shape) # ImageFile img = db.b64encode() # with open("t.png", "rb") as imageFile: # str = base64.b64encode(imageFile.read()) #cv2.imshow('image', cv2.cvtColor(data, cv2.COLOR_BGR2GRAY)) #cv2.waitKey() #str = base64.b64encode(data) # return jsonify(to_frontend, r) #img = Image.open( 'C:\Window Counter_Project\Flickr\Window_101 (131).jpg' ) #img.load() #data = np.asarray( img, dtype="int32" ) #im = Image.fromarray(data.astype("uint8")) #im.show() # uncomment to look at the image rawBytes = io.BytesIO() print(rawBytes) im2.save(rawBytes, "jpeg") #im2.show() print('type of im2 is ', type(im2)) rawBytes.seek(0) # return to the start of the file response_obj = { 'count': r, 'image': "data:image/jpeg;base64," + str(base64.b64encode(rawBytes.read())) } #print("response is", type(response_obj)) return jsonify(Data=response_obj)
def train_net(): # config for data argument cfg = config.Config() cfg.use_horizontal_flips = False cfg.use_vertical_flips = False cfg.rot_90 = False cfg.num_rois = 32 # config中设置的是4 cfg.base_net_weights = os.path.join('./model/', nn.get_weight_path()) # TODO: the only file should to be change for other data to train cfg.model_path = 'samples.hdf5' cfg.simple_label_file = 'annotations_train.txt' # 训练集产生的标签 all_images, classes_count, class_mapping = get_data(cfg.simple_label_file) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) cfg.class_mapping = class_mapping with open(cfg.config_save_file, 'wb') as config_f: pickle.dump(cfg, config_f) print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format( cfg.config_save_file)) inv_map = {v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) random.shuffle(all_images) num_imgs = len(all_images) train_imgs = [s for s in all_images if s['imageset'] == 'trainval'] val_imgs = [s for s in all_images if s['imageset'] == 'test'] print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) # there图片 data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='val') if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) # classifier是什么? # classes_count {} 每一个类的数量:{'cow': 4, 'dog': 10, ...} # C.num_rois每次取的感兴趣区域,默认为32 # roi_input = Input(shape=(None, 4)) 框框 # classifier是faster rcnn的两个损失函数[out_class, out_reg] # shared_layers是vgg的输出feature map classifier = nn.classifier(shared_layers, roi_input, cfg.num_rois, nb_classes=len(classes_count), trainable=True) # 定义model_rpn model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) try: print('loading weights from {}'.format(cfg.base_net_weights)) model_rpn.load_weights(cfg.model_path, by_name=True) model_classifier.load_weights(cfg.model_path, by_name=True) except Exception as e: print(e) print('Could not load pretrained model weights. Weights can be found in the keras application folder ' 'https://github.com/fchollet/keras/tree/master/keras/applications') optimizer = Adam(lr=1e-5) optimizer_classifier = Adam(lr=1e-5) model_rpn.compile(optimizer=optimizer, loss=[losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors)]) model_classifier.compile(optimizer=optimizer_classifier, loss=[losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(classes_count) - 1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mae') epoch_length = 10 num_epochs = int(cfg.num_epochs) iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf class_mapping_inv = {v: k for k, v in class_mapping.items()} print('Starting training') vis = True for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: # 用来监督每一次epoch的平均正回归框的个数 if len(rpn_accuracy_rpn_monitor) == epoch_length and cfg.verbose: mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)) / len(rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format( mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: # 每次都框不到正样本,说明rpn有问题 print('RPN is not producing bounding boxes that overlap' ' the ground truth boxes. Check RPN settings or keep training.') # 迭代器,取数据 # 训练rpn网络,X是图片,Y是对应类别和回归梯度(不是所有的点都参加训练,符合条件才参加训练) # next(data_gen_train)是一个迭代器。 # 返回的是 np.copy(x_img), [np.copy(y_rpn_cls), np.copy(y_rpn_regr)], # img_data_aug(我们这里假设数据没有进行水平翻转等操作。那么,x_img = img_data_aug), # y_rpn_cls和y_rpn_regr是RPN的两个损失函数。 X, Y, img_data = next(data_gen_train) # classifer和rpn网络交叉训练 loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) # result是得到的预选框 # 得到了region proposals,接下来另一个重要的思想就是ROI pooling, # 可将不同shape的特征图转化为固定shape,送到全连接层进行最终的预测。 # rpn_to_roi接收的是每张图片的预测输出,返回的R = [boxes, probs] # --------------------- result = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], cfg, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format # Y1根据预选框,得到这个预选框属于哪一类, # Y2这个类相应的回归梯度 # X2是返回这个框 """ # 通过calc_iou()找出剩下的不多的region对应ground truth里重合度最高的bbox,从而获得model_classifier的数据和标签。 # X2保留所有的背景和match bbox的框; Y1 是类别one-hot转码; Y2是对应类别的标签及回归要学习的坐标位置; IouS是debug用的。 """ X2, Y1, Y2, IouS = roi_helpers.calc_iou(result, img_data, cfg, class_mapping) if X2 is None: # 如果没有有效的预选框则结束本次循环 rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue # 因为是one—hot,最后一位是1,则代表是背景 neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] # 将其变为1维的数组 else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if cfg.num_rois > 1: # 选择num_rois个数的框,送入classifier网络进行训练。 分类网络一次要训练多少个框 # 思路:当num_rois大于1的时候正负样本尽量取到一半,小于1的时候正负样本随机取一个。 if len(pos_samples) < cfg.num_rois // 2: # 挑选正样本 selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice(pos_samples, cfg.num_rois // 2, replace=False).tolist() try: # 挑选负样本 selected_neg_samples = np.random.choice(neg_samples, cfg.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice(neg_samples, cfg.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) # 训练classifier网络 # 是从位置中挑选, loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) # losses[iter_num, 0] = loss_rpn[1] # rpn_cls平均值 losses[iter_num, 1] = loss_rpn[2] # rpn_regr平均值 losses[iter_num, 2] = loss_class[1] # detector_cls平均值 losses[iter_num, 3] = loss_class[2] # detector_regr平均值 losses[iter_num, 4] = loss_class[3] # 4是准确率 iter_num += 1 # 进度条更新 progbar.update(iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) # loss中存放了每一次训练出的losses loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if cfg.verbose: # 打印出前n次loss的平均值 print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format( mean_overlapping_bboxes)) print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format(loss_class_cls)) print('Loss Detector regression: {}'.format(loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: # 当结束一轮的epoch时,只有当这轮epoch的loss小于最优的时候才会存储这轮的训练数据, # 并结束这轮epoch进入下一轮epoch。 if cfg.verbose: print('Total loss decreased from {} to {}, saving weights'.format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights(cfg.model_path) break except Exception as e: print('Exception: {}'.format(e)) # save model model_all.save_weights(cfg.model_path) continue print('Training complete, exiting.')
img = cv2.imread(filepath) #imggg = Image.fromarray(img) #imggg.show() #cv2.imshow('img', img) #cv2.waitKey() X, fx, fy = format_img(img, C) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0:
def Train_frcnn( train_path, # path to the text file containing the data network_arch, # the full faster rcnn network architecture object num_epochs, # num of epochs output_weight_path, # path to save the model_all.weights as hdf5 preprocessing_function=None, config_filename="config.pickle", input_weights_path=None, train_rpn=True, train_final_classifier=True, train_base_nn=True, losses_to_watch=['rpn_cls', 'rpn_reg', 'final_cls', 'final_reg'], tb_log_dir="log", num_rois=32, horizontal_flips=False, vertical_flips=False, rot_90=False, anchor_box_scales=[128, 256, 512], anchor_box_ratios=[[1, 1], [1. / math.sqrt(2), 2. / math.sqrt(2)], [2. / math.sqrt(2), 1. / math.sqrt(2)]], im_size=600, rpn_stride=16, # depends on network architecture visualize_model=None, verify_trainable=True, optimizer_rpn=Adam(lr=1e-5), optimizer_classifier=Adam(lr=1e-5), validation_interval=3, rpn_min_overlap=0.3, rpn_max_overlap=0.7, classifier_min_overlap=0.1, classifier_max_overlap=0.5, rpn_nms_threshold=0.7, # original implementation seed=5000): """ Trains a Faster RCNN for object detection in keras NOTE: This trains 2 models namely model_rpn and model_classifer with the same shared base_nn (fixed feature extractor) Keyword Arguments train_path -- str: path to the text file or pascal_voc (no Default) network_arch --object: the full faster rcnn network .py file passed as an object (no default) num_epochs -- int: number of epochs to train (no Default) output_weight_path --str: path to save the frcnn weights (no Default) preprocessing_function --function: Optional preprocessing function (must be defined like given in keras docs) (Default None) config_filename --str: Path to save the config file. Used when testing (Default "config.pickle") input_weight_path --str: Path to hdf5 file containing weights for the model (Default None) you can pass path to both classification and detection checkpoints as long as the names dont' change train_rpn --bool: whether to train the rpn layer (Default True) train_final_classifier --bool:Whether to train the final_classifier (Fast Rcnn layer) (Default True) train_base_nn --bool:Whether to train the base_nn/fixed_feature_extractor (Default True) losses_to_watch --list: A list of losses to watch (Default ['rpn_cls','rpn_reg','final_cls','final_reg']). The losses in this list are added and then weights are saved wrt to that. The list can contain any combination of the above 4 only. tb_log_dir --str: path to log dir for tensorboard logging (Default 'log') num_rois --int: The number of rois to use at once (Default = 32) horizontal_flips --bool: augment training data by horizontal flips (Default False) vertical_flips --bool: augment training data by vertical flips (Default False) rot_90 --bool: augment training data by 90 deg rotations (Default False) anchor_box_scales --list: The list of anchor box scales to use (Default [128,256,512]) anchor_box ratios --list of list: The list of anchorbox aspect ratios to use (Default [[1, 1], [1./math.sqrt(2), 2./math.sqrt(2)], [2./math.sqrt(2), 1./math.sqrt(2)]]) im_size --int: The size to resize the image (Default 600). This is the smallest side of Pascal VOC format rpn_stride --int: The stride for rpn (Default = 16) visualize_model --str: Path to save the model as .png file verify_trainable --bool: print layer wise names and prints if it is trainable or not (Default True) optimizer_rpn --keras.optimizer: The optimizer for rpn (Default Adam(lr=1e-5)) optimizer_classifier --keras.optimizer: The optimizer for classifier (Default Adam(lr=1e-5)) validation_interval --int: The frequency (in epochs) to do validation. supply 0 if no validation rpn_min_overlap --float: (0,1) The Min IOU in rpn layer (Default 0.3) (original implementation) rpn_max_overlap --float: (0,1) The max IOU in rpn layer (Default 0.7) (original implementation) classifier_min_overlap --float: (0,1) same as above but in final classifier (Default 0.1) (original implementation) classifier_max_overlap --float: (0,1) same as above (Default 0.5) (original implementation) rpn_nms_threshold --float :(0,1) The threshold above which to supress the bbox using Non max supression in rpn (Default 0.7)(from original implementation) seed --int: To seed the random shuffling of training data (Default = 5000) Performing alternating training: - Use the train_rpn,train_final_classifier and train_base_nn boolean arguments to accomplish alternating training. - While using the above arguments change the members of losses_to_watch = ['rpn_cls','rpn_reg','final_cls','final_reg'] accordingly else it will throw error - for eg if you are training only the base_nn and the rpn set: train_rpn = True train_base_nn = True train_final_classifier = False losses_to_watch = ['rpn_cls','rpn_reg'] (do not include 'final_cls', 'final_reg') OUTPUT: prints the training log. Does not return anything Save details: 1.saves the weights of the full FRCNN model as .h5 2.saves a tensorboard file 3.saves the history of weights saved in ./saving_log.txt so that it can be known at which epoch the model is saved 4.saves the model configuration as a .pickle file 5.optionally saves the full FRCNN architecture as .png NOTE: as of now the batch size = 1 Prints loss = 0 for losses from model which is not being trained TODO: The training is a bit slow because of the data generation step. Generate_data in multiple threads and queue them for faster training """ check_list = ['rpn_cls', 'rpn_reg', 'final_cls', 'final_reg'] for n in losses_to_watch: if n not in check_list: raise ValueError( "unsupported loss the supported losses are: {}".format( check_list)) if not train_rpn: if "rpn_cls" in losses_to_watch or "rpn_reg" in losses_to_watch: raise ValueError( "Cannot watch rpn_cls and rpn_reg when train_rpn == False") if not train_final_classifier: if "final_cls" in losses_to_watch or "final_reg" in losses_to_watch: raise ValueError( "cannot watch final_cls and final_reg when train_final_classifier == False" ) nn = network_arch random.seed(seed) np.random.seed(seed) # pass the settings from the function call, and persist them in the config object C = config.Config() C.rpn_max_overlap = rpn_max_overlap C.rpn_min_overlap = rpn_min_overlap C.classifier_min_overlap = classifier_min_overlap C.classifier_max_overlap = classifier_max_overlap C.anchor_box_scales = anchor_box_scales C.anchor_box_ratios = anchor_box_ratios C.im_size = im_size C.use_horizontal_flips = bool(horizontal_flips) C.use_vertical_flips = bool(vertical_flips) C.rot_90 = bool(rot_90) C.rpn_stride = rpn_stride C.rpn_nms_threshold = rpn_nms_threshold C.weights_all_path = output_weight_path C.num_rois = int(num_rois) # check if weight path was passed via command line if input_weights_path: C.initial_weights = input_weights_path all_imgs, classes_count, class_mapping = get_data(train_path) print("The class mapping is:") print(class_mapping) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) with open(config_filename, 'wb') as config_f: pickle.dump(C, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(config_filename)) np.random.shuffle(all_imgs) train_imgs = [s for s in all_imgs if s['imageset'] == 'train'] val_imgs = [s for s in all_imgs if s['imageset'] == 'valid'] print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=train_base_nn) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors, trainable=train_rpn) # define the classifier, built on base layers classifier = nn.classifier(shared_layers, roi_input, C.num_rois, len(classes_count), trainable=train_final_classifier) # create models model_base = Model(img_input, shared_layers) # for computing the output shape model_rpn = Model(img_input, rpn[:2]) # used for training model_classifier = Model([img_input, roi_input], classifier) # used for training # this is a model that holds both the RPN and the classifier, used to load/save and freeze/unfreeze weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) # tensorboard tbCallBack = TensorBoard(log_dir=tb_log_dir, histogram_freq=1, write_graph=False, write_images=False) tbCallBack.set_model(model_all) #NOTE: both model_rpn and model_classifer contains the base_nn try: print('loading weights from {}'.format(C.initial_weights)) model_all.load_weights(C.initial_weights, by_name=True) except: print('Could not load pretrained model weights') # number of trainable parameters trainable_count = int( np.sum([K.count_params(p) for p in set(model_all.trainable_weights)])) non_trainable_count = int( np.sum( [K.count_params(p) for p in set(model_all.non_trainable_weights)])) print('Total params: {:,}'.format(trainable_count + non_trainable_count)) print('Trainable params: {:,}'.format(trainable_count)) print('Non-trainable params: {:,}'.format(non_trainable_count)) if verify_trainable: for layer in model_all.layers: print(layer.name, layer.trainable) model_rpn.compile(optimizer=optimizer_rpn, loss=[ Losses.rpn_loss_cls(num_anchors), Losses.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ Losses.class_loss_cls, Losses.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mse') # save model_all as png for visualization if visualize_model != None: plot_model(model=model_all, to_file=visualize_model, show_shapes=True, show_layer_names=True) epoch_length = len(train_imgs) validation_epoch_length = len(val_imgs) num_epochs = int(num_epochs) iter_num = 0 # train and valid data generator data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, model_base, K.image_dim_ordering(), preprocessing_function, mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, model_base, K.image_dim_ordering(), preprocessing_function, mode='val') losses_val = np.zeros((validation_epoch_length, 5)) losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf val_best_loss = np.Inf val_best_loss_epoch = 0 print('Starting training') def write_log(callback, names, logs, batch_no): for name, value in zip(names, logs): summary = tf.Summary() summary_value = summary.value.add() summary_value.simple_value = value summary_value.tag = name callback.writer.add_summary(summary, batch_no) callback.writer.flush() train_names = [ 'train_loss_rpn_cls', 'train_loss_rpn_reg', 'train_loss_class_cls', 'train_loss_class_reg', 'train_total_loss', 'train_acc' ] val_names = [ 'val_loss_rpn_cls', 'val_loss_rpn_reg', 'val_loss_class_cls', 'val_loss_class_reg', 'val_total_loss', 'val_acc' ] for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) if train_rpn: loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=C.rpn_nms_threshold, flag="train") # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) if train_final_classifier: loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) # losses if train_rpn: losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] else: losses[iter_num, 0] = 0 losses[iter_num, 1] = 0 if train_final_classifier: losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] # accuracy else: losses[iter_num, 2] = 0 losses[iter_num, 3] = 0 losses[iter_num, 4] = 0 iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: if train_rpn: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) else: loss_rpn_cls = 0 loss_rpn_regr = 0 if train_final_classifier: loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) else: loss_class_cls = 0 loss_class_regr = 0 class_acc = 0 mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format( loss_class_cls)) print('Loss Detector regression: {}'.format( loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) loss_dict_train = { "rpn_cls": loss_rpn_cls, "rpn_reg": loss_rpn_regr, "final_cls": loss_class_cls, "final_reg": loss_class_regr } curr_loss = 0 for l in losses_to_watch: curr_loss += loss_dict_train[l] iter_num = 0 start_time = time.time() write_log(tbCallBack, train_names, [ loss_rpn_cls, loss_rpn_regr, loss_class_cls, loss_class_regr, curr_loss, class_acc ], epoch_num) if curr_loss < best_loss: if C.verbose: print( 'Total loss decreased from {} to {} in training, saving weights' .format(best_loss, curr_loss)) save_log_data = '\nTotal loss decreased from {} to {} in epoch {}/{} in training, saving weights'.format( best_loss, curr_loss, epoch_num + 1, num_epochs) with open("./saving_log.txt", "a") as f: f.write(save_log_data) best_loss = curr_loss model_all.save_weights(C.weights_all_path) break except Exception as e: print('Exception: {}'.format(e)) continue if validation_interval > 0: # validation if (epoch_num + 1) % validation_interval == 0: progbar = generic_utils.Progbar(validation_epoch_length) print("Validation... \n") while True: try: X, Y, img_data = next(data_gen_val) if train_rpn: val_loss_rpn = model_rpn.test_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi( P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=C.rpn_nms_threshold, flag="train") # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) if train_final_classifier: val_loss_class = model_classifier.test_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) if train_rpn: losses_val[iter_num, 0] = val_loss_rpn[1] losses_val[iter_num, 1] = val_loss_rpn[2] else: losses_val[iter_num, 0] = 0 losses_val[iter_num, 1] = 0 if train_final_classifier: losses_val[iter_num, 2] = val_loss_class[1] losses_val[iter_num, 3] = val_loss_class[2] losses_val[iter_num, 4] = val_loss_class[3] else: losses_val[iter_num, 2] = 0 losses_val[iter_num, 3] = 0 losses_val[iter_num, 4] = 0 iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses_val[:iter_num, 0])), ('rpn_regr', np.mean(losses_val[:iter_num, 1])), ('detector_cls', np.mean(losses_val[:iter_num, 2])), ('detector_regr', np.mean(losses_val[:iter_num, 3]))]) if iter_num == validation_epoch_length: if train_rpn: val_loss_rpn_cls = np.mean(losses_val[:, 0]) val_loss_rpn_regr = np.mean(losses_val[:, 1]) else: val_loss_rpn_cls = 0 val_loss_rpn_regr = 0 if train_final_classifier: val_loss_class_cls = np.mean(losses_val[:, 2]) val_loss_class_regr = np.mean(losses_val[:, 3]) val_class_acc = np.mean(losses_val[:, 4]) else: val_loss_class_cls = 0 val_loss_class_regr = 0 val_class_acc = 0 mean_overlapping_bboxes = float( sum(rpn_accuracy_for_epoch)) / len( rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] loss_dict_valid = { "rpn_cls": val_loss_rpn_cls, "rpn_reg": val_loss_rpn_regr, "final_cls": val_loss_class_cls, "final_reg": val_loss_class_regr } val_curr_loss = 0 for l in losses_to_watch: val_curr_loss += loss_dict_valid[l] write_log(tbCallBack, val_names, [ val_loss_rpn_cls, val_loss_rpn_regr, val_loss_class_cls, val_loss_class_regr, val_curr_loss, val_class_acc ], epoch_num) if C.verbose: print('[INFO VALIDATION]') print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(val_class_acc)) print('Loss RPN classifier: {}'.format( val_loss_rpn_cls)) print('Loss RPN regression: {}'.format( val_loss_rpn_regr)) print('Loss Detector classifier: {}'.format( val_loss_class_cls)) print('Loss Detector regression: {}'.format( val_loss_class_regr)) print( "current loss: %.2f, best loss: %.2f at epoch: %d" % (val_curr_loss, val_best_loss, val_best_loss_epoch)) print('Elapsed time: {}'.format(time.time() - start_time)) if val_curr_loss < val_best_loss: if C.verbose: print( 'Total loss decreased from {} to {}, saving weights' .format(val_best_loss, val_curr_loss)) save_log_data = '\nTotal loss decreased from {} to {} in epoch {}/{} in validation, saving weights'.format( val_best_loss, val_curr_loss, epoch_num + 1, num_epochs) with open("./saving_log.txt", "a") as f: f.write(save_log_data) val_best_loss = val_curr_loss val_best_loss_epoch = epoch_num model_all.save_weights(C.weights_all_path) start_time = time.time() iter_num = 0 break except: pass print('Training complete, exiting.')
def train_kitti(): # config for data argument cfg = config.Config() cfg.use_horizontal_flips = True cfg.use_vertical_flips = True cfg.rot_90 = True cfg.num_rois = 32 cfg.base_net_weights = os.path.join('./model/', nn.get_weight_path()) # TODO: the only file should to be change for other data to train cfg.model_path = './model/resnet50_weights_tf_dim_ordering_tf_kernels.h5' cfg.simple_label_file = './kitti_simple_label.txt' all_images, classes_count, class_mapping = get_data(cfg.simple_label_file) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) cfg.class_mapping = class_mapping with open(cfg.config_save_file, 'wb') as config_f: pickle.dump(cfg, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(cfg.config_save_file)) inv_map = {v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) random.shuffle(all_images) num_imgs = len(all_images) train_imgs = [s for s in all_images if s['imageset'] == 'trainval'] val_imgs = [s for s in all_images if s['imageset'] == 'test'] print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, cfg, nn.get_img_output_length, K.image_dim_ordering(), mode='val') if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(cfg.anchor_box_scales) * len(cfg.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, cfg.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) try: print('loading weights from {}'.format(cfg.base_net_weights)) model_rpn.load_weights(cfg.model_path, by_name=True) model_classifier.load_weights(cfg.model_path, by_name=True) except Exception as e: print(e) print( 'Could not load pretrained model weights. Weights can be found in the keras application folder ' 'https://github.com/fchollet/keras/tree/master/keras/applications') optimizer = Adam(lr=1e-5) optimizer_classifier = Adam(lr=1e-5) model_rpn.compile(optimizer=optimizer, loss=[ losses_fn.rpn_loss_cls(num_anchors), losses_fn.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ losses_fn.class_loss_cls, losses_fn.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mae') epoch_length = 500 num_epochs = int(cfg.num_epochs) iter_num = 0 losses = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf class_mapping_inv = {v: k for k, v in class_mapping.items()} print('Starting training') vis = True for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor ) == epoch_length and cfg.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap' ' the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) result = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], cfg, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( result, img_data, cfg, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if cfg.num_rois > 1: if len(pos_samples) < cfg.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, cfg.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, cfg.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses[iter_num, 0] = loss_rpn[1] losses[iter_num, 1] = loss_rpn[2] losses[iter_num, 2] = loss_class[1] losses[iter_num, 3] = loss_class[2] losses[iter_num, 4] = loss_class[3] iter_num += 1 progbar.update( iter_num, [('rpn_cls', np.mean(losses[:iter_num, 0])), ('rpn_regr', np.mean(losses[:iter_num, 1])), ('detector_cls', np.mean(losses[:iter_num, 2])), ('detector_regr', np.mean(losses[:iter_num, 3]))]) if iter_num == epoch_length: loss_rpn_cls = np.mean(losses[:, 0]) loss_rpn_regr = np.mean(losses[:, 1]) loss_class_cls = np.mean(losses[:, 2]) loss_class_regr = np.mean(losses[:, 3]) class_acc = np.mean(losses[:, 4]) mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if cfg.verbose: print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format( loss_class_cls)) print('Loss Detector regression: {}'.format( loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if cfg.verbose: print( 'Total loss decreased from {} to {}, saving weights' .format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights(cfg.model_path) break except Exception as e: print('Exception: {}'.format(e)) # save model model_all.save_weights(cfg.model_path) continue print('Training complete, exiting.')
def training(train_path, input_weight_path, parser="pascal_voc", num_rois=32, network="resnet50", horizontal_flips=False, vertical_flips=False, rot_90=False, num_epochs=2000, config_filename="config.pickle", output_weight_path='./model_frcnn.hdf5'): ''' train_path = Path to training data parser = Parser to use. One of simple or pascal_voc num_rois = Number of RoIs to process at once network = Base network to use. Supports vgg or resnet50 horizontal_flips = Augment with horizontal flips in training. (Default=false) vertical_flips = Augment with vertical flips in training. (Default=false) rot_90 = Augment with 90 degree rotations in training. (Default=false) num_epochs = Number of epochs config_filename = Location to store all the metadata related to the training (to be used when testing) output_weight_path = Output path for weights input_weight_path = Input path for weights. If not specified, will try to load default weights provided by keras ''' if not train_path: # if filename is not given print( 'Error: path to training data must be specified. Pass --path to command line' ) if parser == 'pascal_voc': from keras_frcnn.pascal_voc_parser import get_data elif parser == 'simple': from keras_frcnn.simple_parser import get_data else: raise ValueError( "Command line option parser must be one of 'pascal_voc' or 'simple'" ) # pass the settings from the command line, and persist them in the config object C = config.Config() C.use_horizontal_flips = bool(horizontal_flips) C.use_vertical_flips = bool(vertical_flips) C.rot_90 = bool(rot_90) C.model_path = output_weight_path C.num_rois = int(num_rois) if network == 'vgg': C.network = 'vgg' from keras_frcnn import vgg as nn elif network == 'resnet50': from keras_frcnn import resnet as nn C.network = 'resnet50' else: print('Not a valid model') raise ValueError # check if weight path was passed via command line if input_weight_path != "": C.base_net_weights = input_weight_path else: # set the path to weights based on backend and model C.base_net_weights = nn.get_weight_path() all_imgs, classes_count, class_mapping = get_data(train_path) if 'bg' not in classes_count: classes_count['bg'] = 0 class_mapping['bg'] = len(class_mapping) C.class_mapping = class_mapping inv_map = {v: k for k, v in class_mapping.items()} print('Training images per class:') pprint.pprint(classes_count) print('Num classes (including bg) = {}'.format(len(classes_count))) config_output_filename = config_filename with open(config_output_filename, 'wb') as config_f: pickle.dump(C, config_f) print( 'Config has been written to {}, and can be loaded when testing to ensure correct results' .format(config_output_filename)) random.shuffle(all_imgs) num_imgs = len(all_imgs) train_imgs = [s for s in all_imgs if s['imageset'] == 'trainval'] val_imgs = [s for s in all_imgs if s['imageset'] == 'test'] print('Num train samples {}'.format(len(train_imgs))) print('Num val samples {}'.format(len(val_imgs))) data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train') data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='val') if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) else: input_shape_img = (None, None, 3) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(None, 4)) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True) model_rpn = Model(img_input, rpn[:2]) model_classifier = Model([img_input, roi_input], classifier) # this is a model that holds both the RPN and the classifier, used to load/save weights for the models model_all = Model([img_input, roi_input], rpn[:2] + classifier) try: print('loading weights from {}'.format(C.base_net_weights)) model_rpn.load_weights(C.base_net_weights, by_name=True) model_classifier.load_weights(C.base_net_weights, by_name=True) except: print( 'Could not load pretrained model weights. Weights can be found in the keras application folder \ https://github.com/fchollet/keras/tree/master/keras/applications') optimizer = Adam(lr=1e-5) optimizer_classifier = Adam(lr=1e-5) model_rpn.compile(optimizer=optimizer, loss=[ losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors) ]) model_classifier.compile( optimizer=optimizer_classifier, loss=[ losses.class_loss_cls, losses.class_loss_regr(len(classes_count) - 1) ], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'}) model_all.compile(optimizer='sgd', loss='mae') epoch_length = 1000 num_epochs = int(num_epochs) iter_num = 0 losses_ = np.zeros((epoch_length, 5)) rpn_accuracy_rpn_monitor = [] rpn_accuracy_for_epoch = [] start_time = time.time() best_loss = np.Inf class_mapping_inv = {v: k for k, v in class_mapping.items()} print('Starting training') vis = True for epoch_num in range(num_epochs): progbar = generic_utils.Progbar(epoch_length) print('Epoch {}/{}'.format(epoch_num + 1, num_epochs)) while True: try: if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose: mean_overlapping_bboxes = float( sum(rpn_accuracy_rpn_monitor)) / len( rpn_accuracy_rpn_monitor) rpn_accuracy_rpn_monitor = [] print( 'Average number of overlapping bounding boxes from RPN = {} for {} previous iterations' .format(mean_overlapping_bboxes, epoch_length)) if mean_overlapping_bboxes == 0: print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else: neg_samples = [] if len(pos_samples) > 0: pos_samples = pos_samples[0] else: pos_samples = [] rpn_accuracy_rpn_monitor.append(len(pos_samples)) rpn_accuracy_for_epoch.append((len(pos_samples))) if C.num_rois > 1: if len(pos_samples) < C.num_rois // 2: selected_pos_samples = pos_samples.tolist() else: selected_pos_samples = np.random.choice( pos_samples, C.num_rois // 2, replace=False).tolist() try: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist() except: selected_neg_samples = np.random.choice( neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist() sel_samples = selected_pos_samples + selected_neg_samples else: # in the extreme case where num_rois = 1, we pick a random pos or neg sample selected_pos_samples = pos_samples.tolist() selected_neg_samples = neg_samples.tolist() if np.random.randint(0, 2): sel_samples = random.choice(neg_samples) else: sel_samples = random.choice(pos_samples) loss_class = model_classifier.train_on_batch( [X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]]) losses_[iter_num, 0] = loss_rpn[1] losses_[iter_num, 1] = loss_rpn[2] losses_[iter_num, 2] = loss_class[1] losses_[iter_num, 3] = loss_class[2] losses_[iter_num, 4] = loss_class[3] progbar.update(iter_num + 1, [('rpn_cls', losses_[iter_num, 0]), ('rpn_regr', losses_[iter_num, 1]), ('detector_cls', losses_[iter_num, 2]), ('detector_regr', losses_[iter_num, 3])]) iter_num += 1 if iter_num == epoch_length: loss_rpn_cls = np.mean(losses_[:, 0]) loss_rpn_regr = np.mean(losses_[:, 1]) loss_class_cls = np.mean(losses_[:, 2]) loss_class_regr = np.mean(losses_[:, 3]) class_acc = np.mean(losses_[:, 4]) mean_overlapping_bboxes = float(sum( rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch) rpn_accuracy_for_epoch = [] if C.verbose: print( 'Mean number of bounding boxes from RPN overlapping ground truth boxes: {}' .format(mean_overlapping_bboxes)) print( 'Classifier accuracy for bounding boxes from RPN: {}' .format(class_acc)) print('Loss RPN classifier: {}'.format(loss_rpn_cls)) print('Loss RPN regression: {}'.format(loss_rpn_regr)) print('Loss Detector classifier: {}'.format( loss_class_cls)) print('Loss Detector regression: {}'.format( loss_class_regr)) print('Elapsed time: {}'.format(time.time() - start_time)) curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr iter_num = 0 start_time = time.time() if curr_loss < best_loss: if C.verbose: print( 'Total loss decreased from {} to {}, saving weights' .format(best_loss, curr_loss)) best_loss = curr_loss model_all.save_weights((C.model_path)) break except Exception as e: print('Exception: {}'.format(e)) continue print('Training complete, exiting.')
print( 'RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.' ) X, Y, img_data = next(data_gen_train) loss_rpn = tpu_model.train_on_batch(X, Y) # loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = tpu_model.predict_on_batch(X) # P_rpn = model_rpn.predict_on_batch(X) R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0)
def handler(event, context): img_name = event['img_process'] client.download_file('adaproject', img_name, '/tmp/' + img_name) X = np.load('/tmp/' + img_name) with open('config.pickle', 'rb') as f_in: C = pickle.load(f_in) class_mapping = C.class_mapping num_features = 1024 input_shape_img = (None, None, 3) input_shape_features = (None, None, num_features) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier = Model([feature_map_input, roi_input], classifier) BUCKET_NAME = 'adaproject' # replace with your bucket name KEY = 'model_frcnn.hdf5' # replace with your object key s3 = boto3.resource('s3') try: s3.Bucket(BUCKET_NAME).download_file(KEY, '/tmp/model_frcnn.hdf5') print 'File Found' except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": print("The object does not exist.") else: raise model_rpn.load_weights('/tmp/model_frcnn.hdf5', by_name=True) model_classifier.load_weights('/tmp/model_frcnn.hdf5', by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') # Starting RPN prediction [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] bboxes = {} probs = {} bbox_threshold = 0.8 class_mapping = {v: k for k, v in class_mapping.items()} # print(class_mapping) class_to_color = { class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping } for jk in range(R.shape[0] // C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0] // C.num_rois: # pad R curr_shape = ROIs.shape target_shape = (curr_shape[0], C.num_rois, curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax( P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([ C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h) ]) probs[cls_name].append(np.max(P_cls[0, ii, :])) final_data = [] output = {} for key in bboxes: data = {} bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast( bbox, np.array(probs[key]), overlap_thresh=0.5) data[key] = {} for i in range(new_boxes.shape[0]): data[key]['x'] = str(new_boxes[i][0]) data[key]['y'] = str(new_boxes[i][1]) data[key]['w'] = str(new_boxes[i][2]) data[key]['z'] = str(new_boxes[i][3]) data[key]['prob'] = str(new_probs[i]) final_data.append(data) output['bboxes'] = bboxes output['rpn'] = final_data timestamp = int(time.time() * 1000) table = dynamodb.Table(os.environ['DYNAMODB_TABLE']) result = table.update_item( Key={'requestId': event['requestId']}, ExpressionAttributeNames={ '#status': 'status', '#result': 'result', }, ExpressionAttributeValues={ ':status': 'DONE', ':result': output, ':updatedAt': timestamp, }, UpdateExpression='SET #status = :status, ' '#result = :result, ' 'updatedAt = :updatedAt', ReturnValues='ALL_NEW', ) response = { "statusCode": 200, "body": json.dumps(result['Attributes'], cls=DecimalEncoder) } return response
data_to_run = ref break X, Y = data_to_run['x'], data_to_run['y'] print('\n\n') print('BATCHED X SHAPE: {}'.format(X.shape)) for ii in range(noutputs): print('BATCHED Y {} SHAPE: {}'.format(ii, Y[ii].shape)) print('TRAINING, WATCH THE GPU UTILIZATION SPIKE ON ALL GPUS') loss_rpn = model_rpn.train_on_batch(X, Y) P_rpn = model_rpn.predict_on_batch(X) print('\n\n ==================== WILL CRASH SOON. YOU FIX IT ' '=======================\n\n') R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering( ), use_regr=True, overlap_thresh=0.7, max_boxes=300) # note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format X2, Y1, Y2, IouS = roi_helpers.calc_iou( R, img_data, C, class_mapping) if X2 is None: rpn_accuracy_rpn_monitor.append(0) rpn_accuracy_for_epoch.append(0) continue neg_samples = np.where(Y1[0, :, -1] == 1) pos_samples = np.where(Y1[0, :, -1] == 0) if len(neg_samples) > 0: neg_samples = neg_samples[0] else:
def main(): cleanup() sys.setrecursionlimit(40000) config_output_filename = 'config.pickle' with open(config_output_filename, 'r') as f_in: C = pickle.load(f_in) # turn off any data augmentation at test time C.use_horizontal_flips = False C.use_vertical_flips = False C.rot_90 = False class_mapping = C.class_mapping if 'bg' not in class_mapping: class_mapping['bg'] = len(class_mapping) class_mapping = {v: k for k, v in class_mapping.iteritems()} print(class_mapping) class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping} C.num_rois = num_rois if K.image_dim_ordering() == 'th': input_shape_img = (3, None, None) input_shape_features = (1024, None, None) else: input_shape_img = (None, None, 3) input_shape_features = (None, None, 1024) img_input = Input(shape=input_shape_img) roi_input = Input(shape=(C.num_rois, 4)) feature_map_input = Input(shape=input_shape_features) # define the base network (resnet here, can be VGG, Inception, etc) shared_layers = nn.nn_base(img_input, trainable=True) # define the RPN, built on the base layers num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios) rpn_layers = nn.rpn(shared_layers, num_anchors) classifier = nn.classifier(feature_map_input, roi_input, C.num_rois, nb_classes=len(class_mapping), trainable=True) model_rpn = Model(img_input, rpn_layers) model_classifier_only = Model([feature_map_input, roi_input], classifier) model_classifier = Model([feature_map_input, roi_input], classifier) model_rpn.load_weights(C.model_path, by_name=True) model_classifier.load_weights(C.model_path, by_name=True) model_rpn.compile(optimizer='sgd', loss='mse') model_classifier.compile(optimizer='sgd', loss='mse') all_imgs = [] classes = {} bbox_threshold = 0.8 visualise = True print("Converting video to images..") convert_to_images() print("anotating...") list_files = sorted(get_file_names(img_path), key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)]) for img_name in list_files: if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')): continue print(img_name) st = time.time() filepath = os.path.join(img_path,img_name) img = cv2.imread(filepath) X = format_img(img, C) img_scaled = np.transpose(X.copy()[0, (2, 1, 0), :, :], (1, 2, 0)).copy() img_scaled[:, :, 0] += 123.68 img_scaled[:, :, 1] += 116.779 img_scaled[:, :, 2] += 103.939 img_scaled = img_scaled.astype(np.uint8) if K.image_dim_ordering() == 'tf': X = np.transpose(X, (0, 2, 3, 1)) # get the feature maps and output from the RPN [Y1, Y2, F] = model_rpn.predict(X) R = roi_helpers.rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7) # convert from (x1,y1,x2,y2) to (x,y,w,h) R[:, 2] -= R[:, 0] R[:, 3] -= R[:, 1] # apply the spatial pyramid pooling to the proposed regions bboxes = {} probs = {} for jk in range(R.shape[0]//C.num_rois + 1): ROIs = np.expand_dims(R[C.num_rois*jk:C.num_rois*(jk+1), :], axis=0) if ROIs.shape[1] == 0: break if jk == R.shape[0]//C.num_rois: #pad R curr_shape = ROIs.shape target_shape = (curr_shape[0],C.num_rois,curr_shape[2]) ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype) ROIs_padded[:, :curr_shape[1], :] = ROIs ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :] ROIs = ROIs_padded [P_cls, P_regr] = model_classifier_only.predict([F, ROIs]) for ii in range(P_cls.shape[1]): if np.max(P_cls[0, ii, :]) < bbox_threshold or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1): continue cls_name = class_mapping[np.argmax(P_cls[0, ii, :])] if cls_name not in bboxes: bboxes[cls_name] = [] probs[cls_name] = [] (x, y, w, h) = ROIs[0, ii, :] cls_num = np.argmax(P_cls[0, ii, :]) try: (tx, ty, tw, th) = P_regr[0, ii, 4*cls_num:4*(cls_num+1)] tx /= C.classifier_regr_std[0] ty /= C.classifier_regr_std[1] tw /= C.classifier_regr_std[2] th /= C.classifier_regr_std[3] x, y, w, h = roi_helpers.apply_regr(x, y, w, h, tx, ty, tw, th) except: pass bboxes[cls_name].append([16*x, 16*y, 16*(x+w), 16*(y+h)]) probs[cls_name].append(np.max(P_cls[0, ii, :])) all_dets = [] all_objects = [] for key in bboxes: bbox = np.array(bboxes[key]) new_boxes, new_probs = roi_helpers.non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.5) for jk in range(new_boxes.shape[0]): (x1, y1, x2, y2) = new_boxes[jk,:] cv2.rectangle(img_scaled,(x1, y1), (x2, y2), class_to_color[key],2) textLabel = '{}: {}'.format(key,int(100*new_probs[jk])) all_dets.append((key,100*new_probs[jk])) all_objects.append((key, 1)) (retval,baseLine) = cv2.getTextSize(textLabel,cv2.FONT_HERSHEY_COMPLEX,1,1) textOrg = (x1, y1-0) cv2.rectangle(img_scaled, (textOrg[0] - 5, textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (0, 0, 0), 2) cv2.rectangle(img_scaled, (textOrg[0] - 5,textOrg[1]+baseLine - 5), (textOrg[0]+retval[0] + 5, textOrg[1]-retval[1] - 5), (255, 255, 255), -1) cv2.putText(img_scaled, textLabel, textOrg, cv2.FONT_HERSHEY_DUPLEX, 1, (0, 0, 0), 1) print('Elapsed time = {}'.format(time.time() - st)) height, width, channels = img_scaled.shape cv2.rectangle(img_scaled, (0,0), (width, 30), (0, 0, 0), -1) cv2.putText(img_scaled, "Obj count: " + str(list(accumulate(all_objects))), (5, 19), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1) cv2.imwrite(os.path.join(output_path, img_name), img_scaled) print(all_dets) print("saving to video..") save_to_video()