示例#1
0
# digit-detection models and setting
detect_model = "model/digit-detector/detector_model.hdf5"
recognize_model = "model/digit-detector/recognize_model.hdf5"

mean_value_for_detector = 107.524
mean_value_for_recognizer = 112.833

model_input_shape = (32,32,1)

preproc_for_detector = preproc.GrayImgPreprocessor(mean_value_for_detector)
preproc_for_recognizer = preproc.GrayImgPreprocessor(mean_value_for_recognizer)

char_detector = cls.CnnClassifier(detect_model, preproc_for_detector, model_input_shape)
char_recognizer = cls.CnnClassifier(recognize_model, preproc_for_recognizer, model_input_shape)

digit_spotter = detector.DigitSpotter(char_detector, char_recognizer, rp.MserRegionProposer())

# open the csv file of player-face-detection and find out the cloths color for each teams 
df = pd.read_csv('C:/Users/shing/Desktop/player_info.csv', names =('Team','Num','Name','R','G','B'))  
RDB_med = df.groupby('Team').median()
R0_med, G0_med, B0_med = RDB_med.iloc[0,1:4]
R1_med, G1_med, B1_med = RDB_med.iloc[1,1:4]

def process (input_image, params, model_params):

    #oriImg = cv2.imread(input_image)  # B,G,R order
    oriImg = input_image
    
    multiplier = [x * model_params['boxsize'] / oriImg.shape[0] for x in params['scale_search']]

    heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19))
                                   n_files_to_sample=1000,
                                   random_order=False)
    annotator = ann.SvhnAnnotation(ANNOTATION_FILE)

    preprocessor_for_detector = preproc.GrayImgPreprocessor(
        mean_value_for_detector)
    preprocessor_for_recognizer = preproc.GrayImgPreprocessor(
        mean_value_for_recognizer)

    detector = cls.CnnClassifier(detect_model, preprocessor_for_detector,
                                 model_input_shape)
    recognizer = cls.CnnClassifier(recognize_model,
                                   preprocessor_for_recognizer,
                                   model_input_shape)

    proposer = rp.MserRegionProposer()

    # 2. create detector
    det = detect.DigitSpotter(detector, recognizer, proposer)

    # 3. Evaluate average precision
    evaluator = eval.Evaluator(det, annotator, rp.OverlapCalculator())
    recall, precision, f1_score = evaluator.run(img_files)
    # recall value : 0.1757938334100322, precision value : 0.18070009460737937, f1_score : 0.1782132027058549

    # 4. Evaluate MSER
    detector = cls.TrueBinaryClassifier(input_shape=model_input_shape)
    preprocessor = preproc.NonePreprocessor()

    # Todo : detector, recognizer 를 none type 으로
    det = detect.DigitSpotter(detector, recognizer, proposer)
POS_OVERLAP_THD = 0.6
PATCH_SIZE = (32, 32)

if __name__ == "__main__":

    # 1. file 을 load
    files = file_io.list_files(directory=DIR,
                               pattern="*.png",
                               recursive_option=False,
                               n_files_to_sample=N_IMAGES,
                               random_order=False)
    n_files = len(files)
    n_train_files = int(n_files * 0.8)
    print n_train_files

    extractor = extractor_.Extractor(rp.MserRegionProposer(),
                                     ann.SvhnAnnotation(ANNOTATION_FILE),
                                     rp.OverlapCalculator())
    train_samples, train_labels = extractor.extract_patch(
        files[:n_train_files], PATCH_SIZE, POS_OVERLAP_THD, NEG_OVERLAP_THD)

    extractor = extractor_.Extractor(rp.MserRegionProposer(),
                                     ann.SvhnAnnotation(ANNOTATION_FILE),
                                     rp.OverlapCalculator())
    validation_samples, validation_labels = extractor.extract_patch(
        files[n_train_files:], PATCH_SIZE, POS_OVERLAP_THD, NEG_OVERLAP_THD)

    print train_samples.shape, train_labels.shape
    print validation_samples.shape, validation_labels.shape

    #     show.plot_images(samples, labels.reshape(-1,).tolist())
示例#4
0
N_IMAGES = None
DIR = '../dataset/svhn/train'
ANNOTATION_FILE = "../dataset/svhn/train/digitStruct.json"
NEG_OVERLAP_THD = 0.05
POS_OVERLAP_THD = 0.6
PATCH_SIZE = (32,32)

if __name__ == "__main__":

    # 1. file 을 load
    files = file_io.list_files(directory=DIR, pattern="*.png", recursive_option=False, n_files_to_sample=N_IMAGES, random_order=False)
    n_files = len(files)
    n_train_files = int(n_files * 0.8)
    print (n_train_files)
    
    extractor = extractor_.Extractor(rp.MserRegionProposer(), ann.SvhnAnnotation(ANNOTATION_FILE), rp.OverlapCalculator())
    train_samples, train_labels = extractor.extract_patch(files[:n_train_files], PATCH_SIZE, POS_OVERLAP_THD, NEG_OVERLAP_THD)

    extractor = extractor_.Extractor(rp.MserRegionProposer(), ann.SvhnAnnotation(ANNOTATION_FILE), rp.OverlapCalculator())
    validation_samples, validation_labels = extractor.extract_patch(files[n_train_files:], PATCH_SIZE, POS_OVERLAP_THD, NEG_OVERLAP_THD)

    print (train_samples.shape, train_labels.shape)
    print (validation_samples.shape, validation_labels.shape)
      
#     show.plot_images(samples, labels.reshape(-1,).tolist())
     
    file_io.FileHDF5().write(train_samples, "train.hdf5", "images", "w", dtype="uint8")
    file_io.FileHDF5().write(train_labels, "train.hdf5", "labels", "a", dtype="int")
 
    file_io.FileHDF5().write(validation_samples, "val.hdf5", "images", "w", dtype="uint8")
    file_io.FileHDF5().write(validation_labels, "val.hdf5", "labels", "a", dtype="int")