Esempio n. 1
0
def get_feat(feature_method, patches):
    msg.timemsg("Generiere {} Features start".format(feature_method))
    if (feature_method == "hog"): X = hog.features(patches, MULTIPLE)
    if (feature_method == "cnn"):
        X = cnn.cnn_instance.features(patches, MULTIPLE)
    if (feature_method == "lbp"): X = lbp.features(patches, MULTIPLE)
    msg.timemsg("Generiere Features fertig")
    return X
Esempio n. 2
0
def __dump(X, path):
    if os.path.exists(path):
        msg.timemsg('Will overite file {} with new pickle dump'.format(path))
    else:
        try:
            msg.timemsg("Dump to {}".format(path))
            with open(path, "wb") as f:
                pickle.dump(X, f, protocol=pickle.HIGHEST_PROTOCOL)
            msg.timemsg("Dump done")
        except:
            msg.timemsg('Exception during dump of {}'.format(path))
            msg.timemsg(traceback.format_exec())
Esempio n. 3
0
def generate_training_data(root_path,
                           json_path,
                           patch_size_height,
                           patch_size_width,
                           depth_min,
                           depth_max,
                           show=False,
                           batch_size=2000):
    list_patches = []
    list_labels = []
    counter = 1

    for i in json.load(open(root_path + "/" + json_path)):
        if check_depth(i["depth"], depth_min, depth_max):

            path_rgb_image = os.path.join(root_path, i["image"])

            try:
                rgb_image = imread(path_rgb_image)
            except:
                msg.timemsg('Could not load image: {}'.format(path_rgb_image))
                continue
            if rgb_image is not None:
                if show:
                    cv2.imshow(" ", rgb_image)
                    cv2.waitKey(0)
                path_rgb_pixelmap = os.path.join(root_path, i["ground-truth"])
                try:
                    rgb_pixelmap = imread(path_rgb_pixelmap)
                except:
                    msg.timemsg('Could not load pixelmap: {}'.format(
                        path_rgb_pixelmap))
                    continue
                if rgb_pixelmap is not None:
                    pos_patches, neg_patches = generate_patches(
                        rgb_image, rgb_pixelmap, patch_size_height,
                        patch_size_width)
                    # gehe in die Listen rein
                    for i in pos_patches:
                        list_patches.append(i)  # get positive patch
                        list_labels.append(1)  # set label of this patch to one

                    for i in neg_patches:
                        list_patches.append(i)  # get negative patch
                        list_labels.append(
                            0)  # set label of this patch to zero
                    if counter % batch_size == 0:
                        yield list_patches, list_labels
                        list_patches = []
                        list_labels = []
                    counter += 1
                else:
                    msg.timemsg('Could not load pixelmap: {}'.format(
                        path_rgb_pixelmap))
            else:
                msg.timemsg('Could not load image: {}'.format(path_rgb_image))
    yield list_patches, list_labels
Esempio n. 4
0
def __load(path):
    if os.path.exists(path):
        msg.timemsg("Load dump from {}".format(path))
        with open(path, "rb") as f:
            X = pickle.load(f)
        msg.timemsg("Load dump done")
        return X
    else:
        msg.timemsg('Could not load pickle: {}'.format(path))
        return None
Esempio n. 5
0
def prediction(root_path,
               json_path,
               pattern,
               features,
               patch_size_height,
               patch_size_width,
               depth_min,
               depth_max,
               args,
               show=False,
               write=True):
    test_json = json.load(open(root_path + "/" + json_path))
    n_test = len(test_json)
    counter = 1
    for i in test_json:
        if check_depth(i["depth"], depth_min, depth_max):
            # Zeitmessung
            start_time = time.time()
            path_rgb_image = os.path.join(root_path, i["image"])
            try:
                loaded_picture = imread(path_rgb_image)
            except:
                msg.timemsg('Could not load image: {}'.format(path_rgb_image))
                continue
            path_rgb_pixelmap = os.path.join(root_path, i["ground-truth"])
            try:
                loaded_pixelmap = imread(path_rgb_pixelmap)
            except:
                msg.timemsg(
                    'Could not load pixelmap: {}'.format(path_rgb_pixelmap))
                continue
            if loaded_picture is not None and loaded_pixelmap is not None:
                height = loaded_picture.shape[0]
                width = loaded_picture.shape[1]
                dumpname = get_dumpname(args)
                if pattern == "RP":  # rectangle pattern
                    coordinates = rectangle.create_coordinates_list(
                        patch_size_height, patch_size_width, height, width)
                    patches = rectangle.patches(loaded_picture, coordinates,
                                                patch_size_height,
                                                patch_size_width)
                if pattern == "SP_SLIC":  # simple linear iterative clustering
                    patches, segments = superpixel.patches(
                        loaded_picture, "SP_SLIC", patch_size_height,
                        patch_size_width, height, width
                    )  # Patches wurden in Rechtecke umgewandelt, Segmente werden als Koordinaten beschrieben
                if pattern == "SP_CW":  # compact watershed
                    patches, segments = superpixel.patches(
                        loaded_picture, "SP_CW", patch_size_height,
                        patch_size_width, height, width
                    )  # Patches wurden in Rechtecke umgewandelt, Segmente werden als Koordinaten beschrieben
                # feature extraction: LBP, HOG, CNN
                X = get_features(features, patches, '', serialize=False)
                # prediction
                prediction = lr.predict(X, MULTIPLE)
                # time measurement
                elapsed_time = (time.time() - start_time) * 1000  # ms
                global sumarized_prediction_time
                sumarized_prediction_time += elapsed_time
                # calculate pixelmaps
                if pattern == "RP":
                    coordinates = rectangle.create_coordinates_list(
                        patch_size_height, patch_size_width, height, width)
                    classifier_pixel_map = rectangle.logical_pixelmap(
                        prediction, height, width, coordinates,
                        patch_size_height, patch_size_width)
                if pattern == "SP_SLIC" or pattern == "SP_CW":
                    classifier_pixel_map = superpixel.logical_pixelmap(
                        segments, prediction, height, width
                    )  # In Segments stecken die Koordinaten jedes Pixels für das Segment
                # evaluation
                annotated_logical_pixelmap = rgb_pixelmap_to_logical_pixelmap(
                    loaded_pixelmap)
                dict_bundle = evaluate(i["image"], classifier_pixel_map,
                                       annotated_logical_pixelmap)
                global list_eval
                list_eval.append(dict_bundle)
                # debug
                if (args.mode == "debug"):
                    # show colored picture
                    picture = ink_image(loaded_picture, classifier_pixel_map)
                    if show:
                        cv2.imshow(" ", picture)
                        cv2.waitKey(0)
                    if write:
                        debug_base_path = os.path.split(args.output)[0]
                        debug_img_name = os.path.basename(path_rgb_image)
                        debug_out_path = os.path.join(
                            debug_base_path, dumpname + debug_img_name)
                        cv2.imwrite(debug_out_path,
                                    cv2.cvtColor(picture, cv2.COLOR_RGB2BGR))
            else:
                if loaded_pixelmap is None:
                    msg.timemsg('Could not load pixelmap: {}'.format(
                        path_rgb_pixelmap))
                if loaded_picture is None:
                    msg.timemsg(
                        'Could not load image: {}'.format(path_rgb_image))
            msg.timemsg('Prediction Progress: {}%'.format(
                float(counter / n_test) * 100))
            counter += 1
Esempio n. 6
0
def train(root_path, json_path, features, patch_size_height, patch_size_width,
          graph, depth_min, depth_max, args):

    # Zeitmessung
    start_time = time.time()

    counter = 0
    labels = []
    # generate patches
    msg.timemsg("Generate patches start")
    with open(root_path + "/" + json_path) as f:
        train_list = json.load(f)

    base_path = os.path.split(args.output)[0]
    path = os.path.join(base_path, get_dumpname(args))
    clf_path = path + '.clf'
    if os.path.exists(clf_path):
        lr.ini(path=clf_path)
    else:
        for list_patches, list_labels in generate_training_data(
                root_path,
                json_path,
                patch_size_height,
                patch_size_width,
                depth_min,
                depth_max,
                batch_size=BATCH_SIZE):
            msg.timemsg("Batch {}: Patch generation done".format(counter))
            msg.timemsg(str(len(list_patches)) + "Patches have been generated")
            # extract features by respective method
            msg.timemsg("Batch {}: Generate features start".format(counter))
            feat_path = path + '.train.batch{}.feat'.format(counter)
            X_split = get_features(features,
                                   list_patches,
                                   feat_path,
                                   serialize=SERIALIZE_FEATURES)
            msg.timemsg("Batch {}: Generate features done".format(counter))
            if counter == 0:
                X = X_split
            else:
                X = np.vstack((X, X_split))
            labels += list_labels
            counter += 1
        msg.timemsg('Generated all features!')

        lbl_path = path + '.train.lbls'
        if os.path.exists(lbl_path):
            labels = __load(lbl_path)
        else:
            __dump(labels, lbl_path)
        # init lr
        lr.ini()
        # train lr
        msg.timemsg("Training Classifier start")
        lr.train(np.array(X), np.array(labels), path=clf_path)
        msg.timemsg("Training Classifier end")

        # time measurement
        elapsed_time = (time.time() - start_time) * 1000  # ms
        global training_time
        training_time = elapsed_time
Esempio n. 7
0
    args = parser.parse_args()

    dict_input["depth_min"] = args.depth_min
    dict_input["depth_max"] = args.depth_max
    dict_input["patch_size_width"] = args.patch_size_width
    dict_input["patch_size_height"] = args.patch_size_height
    dict_input["feature"] = args.features
    dict_input["patch_type"] = args.pattern

    msg.ini(args.output + '.log')
    # init respective feature extraction method
    if (args.features == "hog"): hog.ini()
    if (args.features == "cnn"): cnn.ini(args.graph)
    if (args.features == "lbp"): lbp.ini()
    msg.timemsg("Training started")
    train(args.folder_root, args.eval_train, args.features,
          int(args.patch_size_height), int(args.patch_size_width), args.graph,
          float(args.depth_min), float(args.depth_max), args)
    dict_experiment["training"] = training_time
    msg.timemsg("Training finished")

    msg.timemsg("Prediction started")
    prediction(args.folder_root, args.eval_test, args.pattern, args.features,
               int(args.patch_size_height), int(args.patch_size_width),
               float(args.depth_min), float(args.depth_max), args)
    dict_experiment["prediction"] = sumarized_prediction_time
    msg.timemsg("Prediction finished")

    if (args.features == "cnn"): cnn.close()