Exemple #1
0
def generate_occlusionClassifier(input_model):
    width = 32
    half_width = 16

    model = read_model(input_model)
    for i in range(1 + half_width):
        yThresh = half_width - i
        new_model = DetectorModel()
        new_model.CopyFrom(model)
        if model.model_window_size:
            model_width = model.model_window_size.x
            model_height = model.model_window_size.y
        print("model.detector_type", model.detector_type)

        if model.detector_type == model.SoftCascadeOverIntegralChannels:
            old_cascade = model.soft_cascade_model
            new_cascade = new_model.soft_cascade_model
            print("Model has %i stages" % len(old_cascade.stages))
            update_cascade(old_cascade, new_cascade, width - yThresh)

            output_path = input_model + "_artificial_crop_" + str(yThresh * 4)

            out_file = open(output_path, "wb")
            out_file.write(new_model.SerializeToString())
            out_file.close()
            print("Create output model file", output_path)
def create_mirrored_occluded_model(input_path, output_path):

    input_model = DetectorModel()
    f = open(input_path, "rb")
    input_model.ParseFromString(f.read())
    f.close()

    output_model = DetectorModel()

    print("Read model", input_model.detector_name or "(no name)",
          "was trained on ", input_model.training_dataset_name)

    occlusion_level, occlusion_type = get_occlusion_level_and_type(
        input_model, input_path)

    if occlusion_level == 0 or (occlusion_type not in [
            DetectorModel.LeftOcclusion, DetectorModel.RightOcclusion
    ]):
        print("occlusion_level, occlusion_type == %s, %s" %
              (occlusion_level, occlusion_type))
        raise RuntimeError("Input model has no left or right occlusion")

    output_model.CopyFrom(input_model)

    output_model.occlusion_level = occlusion_level

    if occlusion_type == DetectorModel.LeftOcclusion:
        output_model.occlusion_type = DetectorModel.RightOcclusion
        output_model.detector_name += " transformed to right model"
    elif occlusion_type == DetectorModel.RightOcclusion:
        output_model.occlusion_type = DetectorModel.LeftOcclusion
        output_model.detector_name += " transformed to left model"
    else:
        raise RuntimeError(
            "Only left and right occlusions are currently handled")

    if not output_model.soft_cascade_model:
        raise RuntimeError(
            "Only SoftCascadeOverIntegralChannels models are currently supported"
        )

    model_width, model_height = get_model_width_and_height(input_model)

    if output_model.soft_cascade_model.channels_description != "hog6_luv":
        raise RuntimeError("Only hog6_luv channels are currently supported")

    for stage in output_model.soft_cascade_model.stages:
        flip_stage(stage, model_width, model_height)

    print("Flipped %i stages in the cascade" %
          len(output_model.soft_cascade_model.stages))

    out_file = open(output_path, "wb")
    out_file.write(output_model.SerializeToString())
    out_file.close()
    print("Create output model file", output_path)
    return
def create_mirrored_occluded_model(input_path, output_path):

    input_model = DetectorModel()
    f = open(input_path, "rb")
    input_model.ParseFromString(f.read())
    f.close()

    output_model = DetectorModel()

    print("Read model", input_model.detector_name or "(no name)", "was trained on ", input_model.training_dataset_name)

    occlusion_level, occlusion_type = get_occlusion_level_and_type(input_model, input_path)

    if occlusion_level == 0 or (occlusion_type not in [DetectorModel.LeftOcclusion, DetectorModel.RightOcclusion]):
        print("occlusion_level, occlusion_type == %s, %s" % (occlusion_level, occlusion_type))
        raise RuntimeError("Input model has no left or right occlusion")

    output_model.CopyFrom(input_model)

    output_model.occlusion_level = occlusion_level

    if occlusion_type == DetectorModel.LeftOcclusion:
        output_model.occlusion_type = DetectorModel.RightOcclusion
        output_model.detector_name += " transformed to right model"
    elif occlusion_type == DetectorModel.RightOcclusion:
        output_model.occlusion_type = DetectorModel.LeftOcclusion
        output_model.detector_name += " transformed to left model"
    else:
        raise RuntimeError("Only left and right occlusions are currently handled")

    if not output_model.soft_cascade_model:
        raise RuntimeError("Only SoftCascadeOverIntegralChannels models are currently supported")

    model_width, model_height = get_model_width_and_height(input_model)

    if output_model.soft_cascade_model.channels_description != "hog6_luv":
        raise RuntimeError("Only hog6_luv channels are currently supported")

    for stage in output_model.soft_cascade_model.stages:
        flip_stage(stage, model_width, model_height)

    print("Flipped %i stages in the cascade" % len(output_model.soft_cascade_model.stages))

    out_file = open(output_path, "wb")
    out_file.write(output_model.SerializeToString())
    out_file.close()
    print("Create output model file", output_path)
    return
Exemple #4
0
def create_updated_model(input_path, cascades_thresholds_path, output_path):

    model = DetectorModel()
    f = open(input_path, "rb")
    model.ParseFromString(f.read())
    f.close()

    thresholds = loadtxt(cascades_thresholds_path)

    print("Read all data, updating the cascade thresholds...")

    if len(thresholds.shape) != 1:
        raise Exception(
            "The number of cascade threshold lines is bigger than 1, check you cascade file"
        )

    # update all the cascade thresholds
    detector = model
    # cascade thresholds where computed using normalized weights,
    # so we need to scale them back
    weights_sum = sum(
        [float(stage.weight) for stage in detector.soft_cascade_model.stages])

    for stage_index, stage in enumerate(detector.soft_cascade_model.stages):
        threshold = thresholds[stage_index]
        if abs(threshold) < 1E10:
            # non-maxfloat value
            threshold *= weights_sum
        else:
            if threshold > 0:
                # bug in test_evaluation code, this should be a very negative value
                # (not a very positive one)
                threshold = -threshold
            else:
                # threshold = threshold
                pass
        stage.cascade_threshold = threshold
    # end of "for each stage"

    out_file = open(output_path, "wb")
    out_file.write(model.SerializeToString())
    out_file.close()
    print("Created output model file", output_path)

    return
Exemple #5
0
def create_brute_force_model():
    

    bottom_path_pattern = "/users/visics/rbenenso/code/doppia/src/applications/boosted_learning/occluded_models/2012_10_16_2030_left_right_bottom_occlusions_0_to_half_brute_force/model_bottom_crop_%i"
    right_path_pattern = "/users/visics/rbenenso/code/doppia/src/applications/boosted_learning/occluded_models/2012_10_16_2030_left_right_bottom_occlusions_0_to_half_brute_force/model_right_crop_%i"

    bottom_models_paths = [bottom_path_pattern % (i*4) for i in range(1, 17)]
    right_models_paths = [right_path_pattern % (i*4) for i in range(1, 9)]
    
    all_models_paths = bottom_models_paths + right_models_paths
    
    model_window_size =  (64, 128)
    model_object_window =  (8, 16, 56, 112) # (min_x, min_y, max_x, max_y)
    shrinking_factor = 4

    for model_path in all_models_paths:    
        input_path = model_path
        input_model = DetectorModel()
        f = open(input_path, "rb")
        input_model.ParseFromString(f.read())
        f.close()
        
        print("Read model", input_model.detector_name or "(no name)", 
              "was trained on ", input_model.training_dataset_name)
    
        assert input_model.soft_cascade_model.shrinking_factor == shrinking_factor
        
        input_model.model_window_size.x = model_window_size[0]
        input_model.model_window_size.y = model_window_size[1]

        input_model.object_window.min_corner.x = model_object_window[0]
        input_model.object_window.min_corner.y = model_object_window[1]
        input_model.object_window.max_corner.x = model_object_window[2]
        input_model.object_window.max_corner.y = model_object_window[3]
        
        print("Updated model window size and object window")
        output_path = input_path
        output_model = input_model
        out_file = open(output_path, "wb")
        out_file.write(output_model.SerializeToString())
        out_file.close()
        print("Updated model file", output_path)
    # end of "for each model path"    
    
    return
def create_multiscales_model(input_paths, output_path):

    multiscales_model = MultiScalesDetectorModel()

    detectors_names = []
    detectors_datasets = []
    scales = []
    for input_path in input_paths:
        model = DetectorModel()
        f = open(input_path, "rb")
        model.ParseFromString(f.read())
        f.close()

        if model.detector_name:
            detectors_names.append(model.detector_name)

        training_dataset_name = model.training_dataset_name
        split_dataset_name = training_dataset_name.split("_")

        if len(split_dataset_name) <= 2:
            print(
                "WARNING: parsing training_dataset_name failed, using plan B")
            print("training_dataset_name ==", training_dataset_name,
                  "we expected something like DataSetName_octave_-0.5")
            # input_path is expected to be of the kind
            # src/applications/boosted_learning/2011_10_13_model_octave_-1.proto.softcascade.bin
            # yes this is a hack !, that is why it is called plan B.
            split_dataset_name = os.path.basename(input_path).split(
                ".")[0].split("_")

        assert len(split_dataset_name) > 2
        if split_dataset_name[-2] != "octave":
            raise RuntimeError("Input model should have a training dataset name of " \
                                "the type DataSetName_octave_-0.5. "\
                                "Instead received '%s'" % training_dataset_name)
        else:
            detectors_datasets.append(split_dataset_name[0])
        if split_dataset_name[-1] == "0.5":
            model_scale = 2**float(0.5849625)
        else:
            model_scale = 2**float(split_dataset_name[-1])
        print("Model %s is at scale %.3f (dataset name == %s)" %
              (input_path, model_scale, split_dataset_name))
        if model_scale in scales:
            raise RuntimeError("The input models define twice the scale %.2f, " \
                                "there should be only one model per scale" % model_scale)
        scales.append(model_scale)
        model_element = multiscales_model.detectors.add()
        model_element.scale = model_scale
        #model_element.detector_name # we leave the element model name non-initialized
        model_element.training_dataset_name = training_dataset_name  # required

        model_element.model_window_size.CopyFrom(model.model_window_size)
        model_element.object_window.CopyFrom(model.object_window)
        model_element.detector_type = model.detector_type
        if model.detector_type == DetectorModel.LinearSvm:
            model_element.linear_svm_model.CopyFrom(model.linear_svm_model)
        elif model.detector_type == DetectorModel.SoftCascadeOverIntegralChannels:
            model_element.soft_cascade_model.CopyFrom(model.soft_cascade_model)
            print("Model %s has %i stages" % \
                    (input_path, len(model_element.soft_cascade_model.stages)))
        #elif model.detector_type == DetectorModel.HoughForest:
        #model_element.hough_forest_model.CopyFrom(model.hough_forest_model)

    # end of "for each input path"

    #print("scales ==", scales)
    #print("detectors_names ==", detectors_names)
    #print("detectors_datasets ==", detectors_datasets)

    multiscales_model.detector_name = "Scales " + ", ".join(
        [str(x) for x in set(scales)])
    multiscales_model.training_dataset_name = " and ".join(
        [str(x) for x in set(detectors_datasets)])

    print("Multiscales detector name ==", multiscales_model.detector_name)
    print("Multiscales training dataset name ==",
          multiscales_model.training_dataset_name)

    out_file = open(output_path, "wb")
    out_file.write(multiscales_model.SerializeToString())
    out_file.close()
    print("Create output model file", output_path)
    return