(x_train, y_train), (x_valid, y_valid), _ = load_training_data(task_idx=3, output_size=224, num_partitions=num_folds, idx_partition=k_fold) debug_visualize = False if debug_visualize: x_train = x_train[:32] y_train = y_train[:32] x_valid = x_valid[:32] y_valid = y_valid[:32] bv = BatchVisualization(images=x_train, true_labels=y_train) bv() num_classes = y_train.shape[1] callbacks = config_cls_callbacks(run_name) model = backbone(backbone_name, **backbone_options).classification_model( input_shape=x_train.shape[1:], num_classes=num_classes, num_dense_layers=num_dense_layers, num_dense_units=num_dense_units, pooling=pooling, dropout_rate=dropout_rate, kernel_regularizer=dense_layer_regularizer, save_to=run_name,
_, (x, y_true), _ = load_training_data(task_idx=task_idx, output_size=224) if len(y_true.shape) == 3: y_true = y_true[..., None] if y_true[0].max() > 1: y_true = (y_true > 127.5).astype(np.uint8) model = backbone(backbone_name).segmentation_model(load_from=run_name, lr=0.001) # max_num_images = 32 max_num_images = x.shape[0] x = x[:max_num_images] y_true = y_true[:max_num_images] y_pred = model.predict(x, batch_size=8) if task_idx == 1: y_pred = task1_post_process(y_prediction=y_pred, threshold=0.5, gauss_sigma=2.) mean_jaccard, thresholded_jaccard = compute_jaccard(y_true=y_true, y_pred=y_pred) print('Mean jaccard = %.3f, Thresholded Jaccard = %.3f ' % (mean_jaccard, thresholded_jaccard)) bv = BatchVisualization(images=x, true_masks=y_true, pred_masks=y_pred) bv() # scores = model.evaluate(x, y_true, batch_size=32, verbose=1) # print(scores)
y_valid = y_valid.astype(np.uint8) n_samples_train = x_train.shape[0] n_samples_valid = x_valid.shape[0] debug_visualize = False if debug_visualize: x_train = x_train[:32] y_train = y_train[:32] x_valid = x_valid[:32] y_valid = y_valid[:32] bv = BatchVisualization(images=x_train, true_masks=y_train) bv() callbacks = config_seg_callbacks(run_name) if from_run_name: model = backbone(backbone_name).segmentation_model( load_from=from_run_name, lr=init_lr) else: model = backbone(backbone_name, **backbone_options).segmentation_model( input_shape=x_train.shape[1:], num_classes=y_train.shape[3], upsampling_type=upsampling_type, bottleneck=bottleneck, init_nb_filters=init_nb_filters,
if __name__ == '__main__': from datasets.ISIC2018 import * from models import backbone from misc_utils.visualization_utils import BatchVisualization from misc_utils.eval_utils import get_confusion_matrix, get_precision_recall backbone_name = 'inception_v3' k_fold = 0 version = '0' run_name = 'task3_' + backbone_name + '_k' + str(k_fold) + '_v' + version _, (x, y_true), _ = load_training_data(task_idx=3, output_size=224, idx_partition=k_fold) model = backbone(backbone_name).classification_model(load_from=run_name) # max_num_images = 32 max_num_images = x.shape[0] x = x[:max_num_images] y_true = y_true[:max_num_images] y_pred = model.predict(x) _ = get_confusion_matrix(y_true=y_true, y_pred=y_pred, print_cm=True) get_precision_recall(y_true=y_true, y_pred=y_pred) bv = BatchVisualization(images=x, true_labels=y_true, pred_labels=y_pred) bv()