def cam_testing_v2(self, img_path):
        """Run CAM algorithm on the specific image

        Returns:
            result_image
            prediction
        """
        from visualisation.core.utils import image_net_postprocessing, imshow, cam_tensor_to_numpy

        # It takes the center of image if the image has no json
        img, _ = load_and_crop(img_path,
                               input_size=self.input_size,
                               center_point=(self.input_size / 2,
                                             self.input_size / 2))

        img = preprocess_input(img).to(self.device, non_blocking=True)
        # print(img.shape)
        # print(img.unsqueeze(0).shape)
        img_w_hm, heatmap, prediction = self.vis(
            img.unsqueeze(0),
            None,
            tta_option=self.tta_rotate_opt_list,
            tta_transform=self.tta_option,
            postprocessing=image_net_postprocessing)

        result_image = cam_tensor_to_numpy(img_w_hm)

        # return cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR), cv2.cvtColor(heatmap, cv2.COLOR_RGB2BGR), prediction
        return result_image, heatmap, prediction
예제 #2
0
def convert_image_to_npz(dataset):
    print("Cropping image....")
    list_path = [
        # os.path.join(dataset, "Train\\OriginImage"),
        # os.path.join(dataset, "Train\\TransformImage"),
        os.path.join(dataset, "Validation"),
        # os.path.join(dataset, "Test"),
    ]

    for subpath in list_path:
        target_path = os.path.join(
            dataset, "Cropped_image_from_{}".format(subpath.split("\\")[-1]))
        os.makedirs(target_path, exist_ok=True)
        for image_path in glob.glob(os.path.join(subpath, "*.bmp")):
            image_name = image_path.split("\\")[-1]
            img, _ = load_and_crop(image_path, 256)
            cv2.imwrite(os.path.join(target_path, image_name), img)
            shutil.copy(image_path + ".json",
                        os.path.join(target_path, image_name + ".json"))
    print("Finish")
    def confusion_matrix_evaluate(self, single_model=True, keep_result=False):

        global gth_name
        csv_path = r"D:\something.csv"

        # csv_path = r"D:\KLA\Jhyn\DLBackend\ODModule\center_points_0010_Mvsd_by_mvsd_MVSD_with_label.csv"

        if_csv_exist = os.path.isfile(csv_path)
        print("Is center points csv file exist?: ", if_csv_exist)
        if if_csv_exist:
            center_point_info = pd.read_csv(csv_path)

        images_result_info = {"image_id": [], "defect": []}
        self.load_weight()
        self.pytorch_model.eval()

        parent_folder = self.config.DATASET_PATH.split("\\")[-1]

        if single_model:
            model_file_name = self.config.WEIGHT_PATH.split("\\")[-1].split(
                ".")[0] + "_" + parent_folder

            result_folder = f"_Result_{model_file_name}"

            workbook = xlsxwriter.Workbook("_model_" + model_file_name +
                                           "_result.xlsx")

            cell_format = workbook.add_format()
            cell_format.set_align('center')
            cell_format.set_align('vcenter')
            cell_format.set_text_wrap()

            highlight_format = workbook.add_format()
            highlight_format.set_align('center')
            highlight_format.set_align('vcenter')
            highlight_format.set_bg_color("red")

            Header = ["image_id", "Image", "Label", "Predict"]
            Header.extend(self.classes)
            Header.append("_Underkill_")
            Header.append("_Overkill_")

        image_size_display = min(int(self.input_size * 1.5),
                                 480)  # 192 as Default

        # time_statistic = pd.DataFrame(columns=['Execution_time'])
        self.initialize_vis()

        sub_dirs_path = [
            os.path.join(self.config.DATASET_PATH, "Train"),
            os.path.join(self.config.DATASET_PATH, "Validation"),
            os.path.join(self.config.DATASET_PATH, "Test"),
            os.path.join(self.config.DATASET_PATH, "mvsd_test"),
            os.path.join(self.config.DATASET_PATH, "Underkill_images"),
            os.path.join(self.config.DATASET_PATH, "OD_blind_set"),
            os.path.join(self.config.DATASET_PATH, "OD_productline_set")
        ]
        for diRectory in sub_dirs_path.copy():
            if not os.path.exists(diRectory) or len(
                    os.listdir(diRectory)) == 0:
                sub_dirs_path.remove(diRectory)

        if len(sub_dirs_path) == 0:
            print("There is no sub-folder, will take the main-folder instead")
            sub_dirs_path.append(self.config.DATASET_PATH)

        # Loop over dataset path
        for sub_path in sub_dirs_path:
            ori_sub_path = sub_path
            if "train" in sub_path.lower().split("\\")[-1]:
                sheet_name = sub_path.split("\\")[-1]
                sub_path = os.path.join(sub_path, "OriginImage")

            else:
                sheet_name = sub_path.split("\\")[-1]

            # Skip non-exist folder
            if not os.path.exists(sub_path) or len(os.listdir(sub_path)) == 0:
                continue

            progress_bar = tqdm(
                glob.glob(os.path.join(sub_path, "*mvsd")) +
                glob.glob(os.path.join(sub_path, "*bmp")))

            print("sub_dirs_path.index(ori_sub_path): ",
                  sub_dirs_path.index(ori_sub_path))
            print(f"Inspecting PATH: {sub_path}")
            if single_model:
                start_row = 0
                start_column = 1
                # Limit the character in the sheet name to 30
                worksheet = workbook.add_worksheet(sheet_name[:30])
                worksheet.write_row(start_row, start_column, Header,
                                    cell_format)
                worksheet.set_column("B:B", 15)
                worksheet.set_column("C:C",
                                     int(15 * (image_size_display / 192)))

            y_gth_eval_ls = []
            y_pred_eval_ls = []

            underkill_count = 0
            overkill_count = 0

            for image_path in progress_bar:
                # with torch.no_grad():
                if single_model:
                    Data = [0] * len(Header)
                    start_row += 1
                    worksheet.set_row(start_row,
                                      int(90 * (image_size_display / 192)))

                underkill_overkill_flag = 0
                # print(data_eval[1])
                # print(data_eval)

                image_name = image_path.split("\\")[-1]
                images_result_info["image_id"].append(image_name)
                center_points = None

                if if_csv_exist:
                    center_points = center_point_info.loc[lambda x: x[
                        "image_id"] == image_name].values.tolist()[0][1:]

                else:
                    # pass
                    if image_path.endswith(".mvsd"):
                        center_points = GetCenter_MVSD(
                            image_path, self.config.INDEX_TRAINING_LAYER)

                img, _ = load_and_crop(image_path, self.input_size,
                                       center_points,
                                       self.config.INDEX_TRAINING_LAYER)

                # start_inference = time.time()
                pred_id, all_scores, pred_name = self.predict_one(img)
                # end_inference = time.time()
                # img_w_heatmap, heatmap, _ = self.cam_testing_v2(image_path)

                # bboxes = heatmap2bbox(heatmap)
                # for box in bboxes:
                #     plot_one_box(img, box, label=pred_name,
                #                            score=None,
                #                            color=(0, 255, 0),
                #                            line_thickness=1)
                # print(image_name)
                # print(bboxes)
                # img = img_w_heatmap
                # img = img * cv2.cvtColor(bit_image, cv2.COLOR_GRAY2RGB)
                # img = img_w_heatmap

                # inference_time = end_inference - start_inference
                # time_statistic = time_statistic.append({"Execution_time": inference_time}, ignore_index=True)

                jsonpath = image_path + ".json"
                with open(jsonpath, "r") as json_file:
                    img_data = json.load(json_file)

                # Get class groundtruth - this will work even with OD format
                temp_classId = img_data['classId']
                # Check if the field 'classId' in json empty or not
                if len(temp_classId) == 0:
                    continue
                else:
                    pass
                final_classId = []
                if len(temp_classId) > 1:
                    for classId in temp_classId:
                        if "reject" in classId.lower():
                            final_classId.append(classId)
                            gth_name = classId
                            break

                        else:
                            pass

                    if len(final_classId) == 0:
                        gth_name = temp_classId[0]

                elif len(temp_classId) == 1:
                    gth_name = temp_classId[0]

                else:
                    gth_name = "Unknown"

                if (gth_name in self.failClasses
                        or "reject" in gth_name.lower()) and (
                            pred_name in self.passClasses
                            or pred_id == len(self.classes)):  # Underkill
                    underkill_count += 1
                    underkill_overkill_flag = -1

                elif (gth_name in self.passClasses
                      or "overkill" in gth_name.lower()
                      ) and pred_name in self.failClasses:  # Overkill
                    overkill_count += 1
                    underkill_overkill_flag = 1

                else:  # Correct result
                    pass

                if single_model:

                    if gth_name == "Empty" and max(all_scores) > 1 / len(
                            self.classes) + 1 / len(self.classes) / 2:
                        mvsd_output_path = os.path.join(
                            "mvsd_Result", pred_name)
                        os.makedirs(mvsd_output_path, exist_ok=True)
                        mvsd_path = os.path.join(mvsd_output_path,
                                                 image_path.split("\\")[-1])
                        shutil.copy(image_path, mvsd_path)

                    os.makedirs(os.path.join(result_folder, gth_name),
                                exist_ok=True)
                    save_path = os.path.join(
                        result_folder, gth_name,
                        image_name.strip(".mvsd") + ".bmp")
                    img = cv2.resize(img,
                                     (image_size_display, image_size_display))
                    cv2.imwrite(save_path, img)
                # Don't need the json file for now
                # shutil.copy(image_path + ".json", os.path.join(result_path, image_name + ".json"))

                # Groundtruth
                if "reject" in gth_name.lower():
                    y_gth_eval_ls.append(0)

                elif "overkill" in gth_name.lower():
                    y_gth_eval_ls.append(1)

                else:
                    y_gth_eval_ls.append(2)

                # Result
                if "reject" in pred_name.lower():
                    y_pred_eval_ls.append(0)
                    images_result_info["defect"].append("Reject")

                elif "overkill" in pred_name.lower():
                    y_pred_eval_ls.append(1)
                    images_result_info["defect"].append("Overkill")

                else:
                    y_pred_eval_ls.append(2)
                    images_result_info["defect"].append("Unknown")

                if single_model:
                    Data[0] = image_name.split(".")[0]
                    Data[2] = gth_name
                    Data[3] = pred_name
                    Data[4:4 + len(self.classes)] = all_scores
                    Data[-2] = True if underkill_overkill_flag == -1 else False
                    Data[-1] = True if underkill_overkill_flag == 1 else False
                    # print(f"[DEBUG]:\n{Data}")
                    for index, info in enumerate(Data):
                        excel_format = highlight_format if (
                            Data[index] == True
                            and isinstance(Data[index], bool)) else cell_format

                        worksheet.insert_image(start_row, index + 1, save_path,
                                               {'x_scale': 0.5, 'y_scale': 0.5, 'x_offset': 5, 'y_offset': 5,
                                                'object_position': 1}) if index == 1 else \
                            worksheet.write(start_row, index + 1, Data[index], excel_format)

                progress_bar.update()

                # End the loop over test data when underkill images > 25
                if (underkill_count > 25
                        or overkill_count > 2000) and not single_model:
                    print("\n")
                    break

            if single_model:
                header = [{'header': head} for head in Header]

                worksheet.add_table(0, 1, start_row, len(Header),
                                    {'columns': header})
                worksheet.freeze_panes(1, 0)
                worksheet.hide_gridlines(2)

            confusion_matrix = metrics.confusion_matrix(
                y_gth_eval_ls, y_pred_eval_ls)
            print(f"Confusion matrix : \n{confusion_matrix}")

        if single_model:
            workbook.close()
            df_result = pd.DataFrame(data=images_result_info)
            df_result.to_csv(f"mvsd_result_{model_file_name}.csv", index=False)

            if not keep_result:
                shutil.rmtree(result_folder)

        else:
            del self.pytorch_model
            model_name = "\\".join(self.config.WEIGHT_PATH.split("\\")[-4:])
            return model_name, confusion_matrix.tolist(
            )[0][1], confusion_matrix.tolist()[1][0]
예제 #4
0
    def confusion_matrix_evaluate(self):
        # self.prepare_data()
        generator_list = self.prepare_data()

        self.pytorch_model.eval()

        workbook = xlsxwriter.Workbook("_model_result.xlsx")

        cell_format = workbook.add_format()
        cell_format.set_align('center')
        cell_format.set_align('vcenter')

        highlight_format = workbook.add_format()
        highlight_format.set_align('center')
        highlight_format.set_align('vcenter')
        highlight_format.set_bg_color("red")

        Header = ["image_id", "Image", "Label", "Predict"]
        Header.extend(self.classes)
        Header.append("Underkill")
        Header.append("Overkill")

        self.failClasses = ["Reject"
                            ] if self.binary_option else self.failClasses
        self.passClasses = ["Pass"] if self.binary_option else self.passClasses

        fail_class_index = [
            self.classes.index(class_) for class_ in self.failClasses
        ]
        pass_class_index = [
            self.classes.index(class_) for class_ in self.passClasses
        ]

        for generator in generator_list:
            generator_loader = DataLoader(generator,
                                          batch_size=1,
                                          shuffle=False,
                                          num_workers=0)
            print(f"Inspecting PATH: {generator.input_dir}")

            start_row = 0
            start_column = 1
            worksheet = workbook.add_worksheet(
                generator.input_dir[0].split("\\")[-1])
            worksheet.write_row(start_row, start_column, Header, cell_format)
            worksheet.set_column("C:C", 10)

            progress_bar = tqdm(generator_loader)
            y_gth_eval_ls = []
            y_pred_eval_ls = []

            for iter, data_eval in enumerate(progress_bar):
                with torch.no_grad():
                    Data = [0] * len(Header)
                    start_row += 1
                    worksheet.set_row(start_row, 60)
                    underkill_overkill_flag = 0

                    inputs_eval , labels_eval = data_eval[0].to(self.device, non_blocking=True), \
                            data_eval[1].to(self.device, non_blocking=True)

                    image_path = data_eval[2][0]
                    image_name = data_eval[2][0].split("\\")[-1]

                    img, gt_name = load_and_crop(image_path, self.input_size)

                    pred_id, all_scores, pred_name = self.predict_one(img)

                    # outputs_eval = self.pytorch_model(inputs_eval)

                    # probability = torch.nn.Softmax(dim=1)(outputs_eval)

                    # pred_id, pred_score, pred_name = self.manage_prediction(probability)

                    # all_scores = probability.tolist()

                    gt_id = labels_eval.tolist()[0]

                    if self.binary_option:
                        gt_name = 'Reject' if gt_name in self.failClasses else 'Pass'
                    else:
                        pass

                    if gt_id in fail_class_index and (
                            pred_id in pass_class_index
                            or pred_id == len(self.classes)):  # Underkill
                        underkill_path = os.path.join(
                            "_Result",
                            image_path.split("\\")[-2], "UK")
                        os.makedirs(underkill_path, exist_ok=True)
                        image_output_path = os.path.join(
                            underkill_path, image_name)
                        cv2.imwrite(image_output_path, img)
                        shutil.copy(
                            image_path + ".json",
                            os.path.join(underkill_path, image_name + ".json"))
                        underkill_overkill_flag = -1

                    elif gt_id in pass_class_index and pred_id in fail_class_index:  # Overkill
                        overkill_path = os.path.join(
                            "_Result",
                            image_path.split("\\")[-2], "OK")
                        os.makedirs(overkill_path, exist_ok=True)
                        image_output_path = os.path.join(
                            overkill_path, image_name)
                        cv2.imwrite(image_output_path, img)
                        shutil.copy(
                            image_path + ".json",
                            os.path.join(overkill_path, image_name + ".json"))
                        underkill_overkill_flag = 1

                    else:  # Correct result
                        result_path = os.path.join("_Result",
                                                   image_path.split("\\")[-2])
                        os.makedirs(result_path, exist_ok=True)
                        image_output_path = os.path.join(
                            result_path, image_name)
                        cv2.imwrite(image_output_path, img)
                        shutil.copy(
                            image_path + ".json",
                            os.path.join(result_path, image_name + ".json"))

                    y_gth_eval_ls.extend(labels_eval.tolist())
                    y_pred_eval_ls.extend([pred_id])

                    Data[0] = image_name.split(".")[0]
                    Data[2] = gt_name
                    Data[3] = pred_name
                    Data[4:4 + len(self.classes)] = all_scores
                    Data[-2] = True if underkill_overkill_flag == -1 else False
                    Data[-1] = True if underkill_overkill_flag == 1 else False
                    # print(f"[DEBUG]:\n{Data}")
                    for index, info in enumerate(Data):

                        excel_format = highlight_format if (
                            Data[index] == True
                            and isinstance(Data[index], bool)) else cell_format

                        worksheet.insert_image(start_row, index + 1, image_output_path, {'x_scale': 0.5,'y_scale': 0.5, 'x_offset': 5, 'y_offset': 5,'object_position':1}\
                            ) if index == 1 else worksheet.write(start_row, index + 1, Data[index], excel_format)

                    progress_bar.update()

            header = [{'header': head} for head in Header]

            worksheet.add_table(0, 1, start_row, len(Header),
                                {'columns': header})
            worksheet.freeze_panes(1, 0)
            worksheet.hide_gridlines(2)

            confusion_matrix = metrics.confusion_matrix(
                y_gth_eval_ls, y_pred_eval_ls)
            print(f"Confusion matrix : \n{confusion_matrix}")

        workbook.close()
예제 #5
0
                os.makedirs(folder, exist_ok=True)

            for filename in os.listdir(test_dir):
                if filename.split('.')[-1] == 'json':
                    img_path = os.path.join(test_dir,
                                            filename.split('.json')[0])
                    with open(os.path.join(test_dir, filename),
                              encoding='utf-8') as jf:
                        json_data = json.load(jf)
                        gt_name = json_data['classId'][0]
                        gt_id = -1
                        for i, label in enumerate(config.CLASS_NAME):
                            if label == gt_name:
                                gt_id = i
                        box = dict(json_data['box'])
                        img, _ = load_and_crop(img_path, config.INPUT_SIZE)
                        pred_id, pred_score, pred_name = model.predict_one(img)

                        pred_dir = ''
                        if gt_name == 'Pass' and pred_name == 'Reject':  # FP - OK
                            pred_dir = os.path.join(test_dir, 'Overkill')
                            detail['Overkill'] += 1
                        elif gt_name == 'Reject' and pred_name == 'Pass':  # FN - UK
                            pred_dir = os.path.join(test_dir, 'Underkill')
                            detail['Underkill'] += 1
                        elif gt_name == pred_name:
                            pred_dir = os.path.join(test_dir, pred_name)
                            detail[pred_name] += 1

                        cv2.imwrite(
                            os.path.join(pred_dir,
예제 #6
0
def get_dataframe(list_Path, fail_list_class, dataframe_name):

    list_Dataframe = []
    # random.seed(0)
    for Path in list_Path:
        Reject_ls = [
            image for image in glob.glob(os.path.join(Path, "*.bmp"))
            if load_and_crop(image)[1] in fail_list_class
        ]
        Pass_ls = [
            image for image in glob.glob(os.path.join(Path, "*.bmp"))
            if load_and_crop(image)[1] == "Pass"
        ]

        DictofClasses = {"Reject": Reject_ls, "Pass": Pass_ls}
        if "train" in Path.lower():
            multi_threshold = pick_Threshold([Reject_ls])
            otsu_threshold = pd.DataFrame()
            otsu_threshold['multi_threshold'] = multi_threshold
            otsu_threshold.to_pickle("otsu_threshold.pkl")
        else:
            multi_threshold = pd.read_pickle(
                "otsu_threshold.pkl")['multi_threshold'].tolist()

        list_Dataframe.append(ReadImageToDataFrame(Path, DictofClasses))

    for DataFrame_ in list_Dataframe:
        DF_model = DataFrame_.copy()
        print(f"")
        progress_bar = tqdm(DF_model.loc[:, "Path"])
        for iter, imgpath in enumerate(progress_bar):
            image_color = process_img(imgpath)

            feature_gray_diff = GetFeature_Differential_GrayInfo_Mean(
                image_color, multi_threshold)
            feature_bit_diff = GetFeature_Differential_BitInfo_Mean(
                image_color)
            feature_contours = GetFeature_ContoursCount(image_color)

            DF_model.loc[iter, "Feature01"] = feature_gray_diff
            DF_model.loc[iter, "Feature02"] = feature_bit_diff
            DF_model.loc[iter, "Feature03"] = feature_contours
            progress_bar.update()
        # print(DF_model)
        idx = 0
        Feature_Vector = []
        Feature_Vector.append([DF_model.loc[idx, "Path"]])
        Feature_Vector.append([DF_model.loc[idx, "Class"]])
        Feature_Vector.append(DF_model.loc[idx, "Feature01"])
        Feature_Vector.append(DF_model.loc[idx, "Feature02"])
        Feature_Vector.append(DF_model.loc[idx, "Feature03"])
        Feature_Vector = [
            item for sublist in Feature_Vector for item in sublist
        ]

        ColumnNames = ["Path", "Class"]
        FeaturesColumn = [
            "Feature " + str(i + 1)
            for i in range(len(Feature_Vector) - len(ColumnNames))
        ]
        ColumnNames = ColumnNames + FeaturesColumn
        DataFrame_Model = pd.DataFrame(columns=ColumnNames)

        for idx in DF_model.index:
            Feature_Vector = []
            Feature_Vector.append([DF_model.loc[idx, "Path"]])
            Feature_Vector.append([DF_model.loc[idx, "Class"]])
            Feature_Vector.append(DF_model.loc[idx, "Feature01"])
            Feature_Vector.append(DF_model.loc[idx, "Feature02"])
            Feature_Vector.append(DF_model.loc[idx, "Feature03"])
            # Feature_Vector.append(TestDefectDF_Model.loc[idx, "Feature04"])
            # Feature_Vector.append(TestDefectDF_Model1.loc[idx, "Feature05"])
            Feature_Vector = [
                item for sublist in Feature_Vector for item in sublist
            ]

            DataFrame_Model.loc[idx, :] = Feature_Vector

        DataFrame_Model.to_pickle(dataframe_name + 'DF_Model.pkl')
예제 #7
0
def process_img(img_path):
    cropped_image, _ = load_and_crop(img_path + ".bmp", input_size=128)
    image_color = img_as_ubyte(cropped_image)
    return image_color