def runTrainning(epochs): config = ProjectConfig() config.NAME = "MyMRCNN_origin_Model" config.display() dataSetFact = dataSetlib.ImageDataSetForMaskFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/transformed_train.xlsx", WORK_DIR + '/data_train.xlsx', WORK_DIR + '/data_val.xlsx') dataSetFact.preload_images() dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() # Validation dataset model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR) model.load_weights(COCO_MODEL_PATH, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask" ]) model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=50, layers='heads')
def runTrainning(epochs): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() config.MEAN_PIXEL = np.array([66.47, 71.05, 83.27]) dataSetFact = dataSetlib.ImageDataTrainningFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/clean_train_images.xlsx", WORK_DIR + '/clean_data_train.xlsx', WORK_DIR + '/clean_data_val.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() dataset_train.noMask = dataset_val.noMask = True # Validation dataset model = modellib.MyBackboneModel(mode="training", config=config, model_dir=MODEL_DIR) model.load_weights(model.find_last(), by_name=True) learning_rate = 0.001 model.train(dataset_train, dataset_val, learning_rate=learning_rate, epochs=50, layers='all')
def runTrainning(epochs): config = cfg.CloudPatternConfig() config.NAME = "MyMRCNN_Inherited_Model" config.BACKBONE = graph.dense_graph_simple_long config.COMPUTE_BACKBONE_SHAPE = compute_backbone_shapes config.IMAGE_SHAPE = [384, 576, 3] config.display() dataSetFact = dataSetlib.ImageDataSetForMRCNNFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/mrcnn_training_images.xlsx", WORK_DIR + '/mrcnn_data_train.xlsx', WORK_DIR + '/mrcnn_data_val.xlsx') dataSetFact.preload_images() dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() # Validation dataset model = modellib.MaskRCNN(mode="training", config=config, model_dir=MODEL_DIR) model.load_weights(INITIAL_MODEL_PATH, by_name=True, exclude=[ "mrcnn_class_logits", "mrcnn_bbox_fc", "mrcnn_bbox", "mrcnn_mask" ]) model.load_weights(model.find_last(), by_name=True) model.train(dataset_train, dataset_val, learning_rate=config.LEARNING_RATE, epochs=150, layers='heads')
def runTesting(): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() dataSetFact = dataSetlib.ImageDataSetForMaskFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/transformed_train.xlsx", WORK_DIR + '/data_train.xlsx', WORK_DIR + '/data_val.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() # Validation dataset df = pd.read_excel(WORK_DIR + "/data_val.xlsx") model = modellib.MyBackboneModel(mode="training", config=config, model_dir=MODEL_DIR) model.load_weights(model.find_last(), by_name=True) totalScore = 0 counts = 0 classes = ["Gravel", "Sugar", "Fish", "Flower"] trueSum = 0 predSum = 0 intersects = 0 for index, row in df.iterrows(): img_key = row["image_id"] trueclzMasks = np.full((384, 576, 4), 0.) inputmasks = np.full((24, 36, 4), 0) classVect = generateClassVec(row) for idx, clz in enumerate(classes): if classVect[idx] == 1: print(WORK_DIR + "/masks_shrinked/" + img_key + "_" + clz + ".png") clzMask = cv2.imread(WORK_DIR + "/masks_shrinked/" + img_key + "_" + clz + ".png") clzMask = clzMask[:, :, [0]] clzMask = np.logical_and(clzMask, clzMask).astype(np.float32) trueclzMasks[:, :, [idx]] = clzMask image = cv2.imread(WORK_DIR + "/train_image_shrinked/" + img_key + ".jpg") image = cv2.resize(image, (config.IMAGE_MAX_DIM, config.IMAGE_MIN_DIM), interpolation=cv2.INTER_LINEAR) outputs = model.keras_model.predict([[image], [inputmasks]]) logits = outputs[0] predclzMasks = logits[0] resizedPredClzMasks = cv2.resize(predclzMasks.astype(np.float32), (576, 384), interpolation=cv2.INTER_NEAREST) resizedPredClzMasks[resizedPredClzMasks >= 0.3] = 1. resizedPredClzMasks[resizedPredClzMasks < 0.3] = 0. # for i in range(4): # resizedPredClzMasks[:,:,[i]] = postProcess(resizedPredClzMasks[:,:,[i]],image) trueSum += np.sum(trueclzMasks) predSum += np.sum(resizedPredClzMasks) intersects += np.sum( np.logical_and(resizedPredClzMasks, trueclzMasks).astype(np.float32)) print("The score is ", 2 * intersects / (predSum + trueSum))
def runTesting(): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() dataSetFact = dataSetlib.ImageDataTrainningFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/transformed_train.xlsx", WORK_DIR + '/data_train.xlsx', WORK_DIR + '/data_val.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() dataset_train.noMask = dataset_val.noMask = True model = modellib.MyBackboneModel(mode="inference", config=config, model_dir=MODEL_DIR) model.load_weights(model.find_last(), by_name=True) image_ids = dataset_val._image_ids df_data = pd.read_excel(WORK_DIR + '/data_val.xlsx') df = pd.DataFrame(columns=["image_id","True_Gravel","True_Sugar","True_Fish","True_Flower","Pred_Gravel",\ "Pred_Sugar","Pred_Fish","Pred_Flower","Match"],index=[]) for id in image_ids: image, gt_class_ids, gt_masks = datagenerator.load_image_gt( dataset_val, config, id, augment=False, augmentation=None, use_mini_mask=config.USE_MINI_MASK) batch_images = np.zeros((1, ) + image.shape, dtype=np.float32) batch_gt_class_ids = np.zeros((1, config.NUM_CLASSES), dtype=np.int32) batch_gt_masks = np.zeros( (1, gt_masks.shape[0], gt_masks.shape[1], config.NUM_CLASSES), dtype=gt_masks.dtype) batch_images[0] = datagenerator.mold_image(image.astype(np.float32), config) output = model.keras_model.predict([batch_images, batch_gt_class_ids]) img_file_id = dataset_val.image_info[id]["id"] true_classes = df_data.loc[img_file_id].values[1:] classes = np.reshape(np.round(output[2]), [-1]) Match = True for idx, v in enumerate(classes): if v != true_classes[idx]: Match = False if img_file_id not in df.index: df.loc[img_file_id] = [img_file_id] + true_classes.tolist( ) + classes.tolist() + [1. if Match else 0.] print(img_file_id) df.to_excel(WORK_DIR + "/MyMRCNN_WHOLE_Model_multi_class_pred_6970.xlsx")
def runTesting(): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() dataSetFact = dataSetlib.ImageDataTrainningFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/clean_train_images.xlsx", WORK_DIR + '/clean_data_train.xlsx', WORK_DIR + '/clean_data_val.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() dataset_train.noMask = dataset_val.noMask = True model = modellib.MyBackboneModel(mode="inference", config=config, model_dir=MODEL_DIR) model.load_weights(WEIGHT_DIR, by_name=True) image_ids = dataset_val._image_ids df = pd.DataFrame(columns=["image_id", "Trueth", "Pred", "Match"], index=[]) for id in image_ids: image, gt_class_ids, gt_masks = datagenerator.load_image_gt( dataset_val, config, id, augment=False, augmentation=None, use_mini_mask=config.USE_MINI_MASK) batch_images = np.zeros((1, ) + image.shape, dtype=np.float32) batch_gt_class_ids = np.zeros((1, config.NUM_CLASSES), dtype=np.int32) batch_gt_masks = np.zeros( (1, gt_masks.shape[0], gt_masks.shape[1], config.NUM_CLASSES), dtype=gt_masks.dtype) batch_images[0] = datagenerator.mold_image(image.astype(np.float32), config) output = model.keras_model.predict([batch_images, batch_gt_class_ids]) class_id = np.argmax(output[0]) class_names = ["Gravel", "Sugar", "Fish", "Flower"] pred_class_name = class_names[class_id] img_file_id = dataset_val.image_info[id]["id"] img_true_class = img_file_id.split("_")[1] if img_file_id not in df.index: df.loc[img_file_id] = [ img_file_id, img_true_class, pred_class_name, 1. if img_true_class == pred_class_name else 0. ] print(img_file_id) df.to_excel(WORK_DIR + "/MyMRCNN_WHOLE_Model_pred_MEAN_37_epoc.xlsx")
def runTesting(): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() dataSetFact = dataSetlib.ImageDataTrainningFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/transformed_train.csv", WORK_DIR + '/data_train_s.xlsx', WORK_DIR + '/data_val_s.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() image, gt_class_ids, gt_masks = datagenerator.load_image_gt( dataset_val, config, 1, augment=False, augmentation=None, use_mini_mask=config.USE_MINI_MASK) batch_images = np.zeros((1, ) + image.shape, dtype=np.float32) batch_gt_class_ids = np.zeros((1, config.NUM_CLASSES), dtype=np.int32) batch_gt_masks = np.zeros( (1, gt_masks.shape[0], gt_masks.shape[1], config.NUM_CLASSES), dtype=gt_masks.dtype) batch_images[0] = datagenerator.mold_image(image.astype(np.float32), config) model = modellib.MyBackboneModel(mode="training", config=config, model_dir=MODEL_DIR) model.load_weights(model.find_last(), by_name=True) output = model.keras_model.predict( [batch_images, batch_gt_class_ids, batch_gt_masks]) masks = output[0][0] masks = np.round(masks) * 255 masks = cv2.resize(masks, (2100, 1400)) masklst = [] class_names = ["Gravel", "Sugar", "Fish", "Flower"] for i in range(masks.shape[-1]): masklst.append(masks[:, :, i]) for i, name in enumerate(class_names): fname = dataset_val.image_info[1]["id"] + "_" + name + ".png" cv2.imwrite(WORK_DIR + "/" + fname, masklst[i]) print(output[0].shape) print('hai')
def runTrainning(epochs): config = Config() config.NAME = "MyMRCNN_WHOLE_Model" config.display() dataSetFact = dataSetlib.ImageDataTrainningFactory(WORK_DIR) dataSetFact.initialize(WORK_DIR + "/transformed_train.csv", WORK_DIR + '/data_train_s.xlsx', WORK_DIR + '/data_val_s.xlsx') dataSetFact.preload_images() dataset_train, dataset_val = dataSetFact.getDataSet() dataset_train.prepare() dataset_val.prepare() # Validation dataset model = modellib.MyBackboneModel(mode="training", config=config, model_dir=MODEL_DIR) # model.load_weights(model.find_last(), by_name=True) learning_rate = 0.001 model.train(dataset_train, dataset_val, learning_rate=learning_rate, epochs=epochs, layers='all')