def ensemble_models_id(self, single_id, set_type='train', model='stage1_unet', show=True, verbose=True): self._load_ensembles(model) d = self._get_cell_data(single_id, set_type) logger.debug('image size=%dx%d' % (d.img_h, d.img_w)) total_model_size = len(self.ensembles['rcnn']) + len( self.ensembles['unet']) logger.debug('total_model_size=%d rcnn=%d unet=%d' % (total_model_size, len( self.ensembles['rcnn']), len(self.ensembles['unet']))) rcnn_instances = [] rcnn_scores = [] # TODO : RCNN Ensemble rcnn_ensemble = False for idx, data in enumerate(self.ensembles['rcnn']): if set_type == 'train': instances, scores = data['valid_instances'].get( single_id, (None, None)) rcnn_ensemble = True else: # TODO ls = data['test_instances'].get(single_id, None) if ls is None: instances = scores = None else: instances = [x[0] for x in ls] scores = [x[1] for x in ls] rcnn_ensemble = True logger.debug('rcnn # instances = %d' % len(instances)) if instances is None: logger.warning('Not found id=%s in RCNN %d Model' % (single_id, idx + 1)) continue rcnn_instances.extend( [instance[:d.img_h, :d.img_w] for instance in instances]) rcnn_scores.extend([ s * HyperParams.get().rcnn_score_rescale for s in scores ]) # rescale scores total_instances = [] total_scores = [] # TODO : UNet Ensemble for idx, data in enumerate(self.ensembles['unet']): if set_type == 'train': instances, scores = data['valid_instances'].get( single_id, (None, None)) else: instances, scores = data['test_instances'].get( single_id, (None, None)) if instances is None: logger.warning('Not found id=%s in UNet %d Model' % (single_id, idx + 1)) continue total_instances.extend(instances) total_scores.extend(scores) # if single_id in ['646f5e00a2db3add97fb80a83ef3c07edd1b17b1b0d47c2bd650cdcab9f322c0']: # take too long # logger.warning('no ensemble id=%s' % single_id) # break watch = StopWatch() watch.start() logger.debug('voting+ size=%d' % len(total_instances)) # TODO : Voting? voting_th = HyperParams.get().ensemble_voting_th rects = [get_rect_of_mask(a) for a in total_instances] voted = [] for i, x in enumerate(total_instances): voted.append( filter_by_voting( (x, total_instances, voting_th, 0.3, rects[i], rects))) total_instances = list(compress(total_instances, voted)) total_scores = list(compress(total_scores, voted)) watch.stop() logger.debug('voting elapsed=%.5f' % watch.get_elapsed()) watch.reset() # nms watch.start() logger.debug('nms+ size=%d' % len(total_instances)) instances, scores = Network.nms( total_instances, total_scores, None, thresh=HyperParams.get().ensemble_nms_iou) watch.stop() logger.debug('nms elapsed=%.5f' % watch.get_elapsed()) watch.reset() # high threshold if not exists in RCNN if rcnn_ensemble: voted = [] for i, x in enumerate(instances): voted.append( filter_by_voting((x, rcnn_instances, 1, 0.3, None, None))) new_instances = [] new_scores = [] for instance, score, v in zip(instances, scores, voted): if v: new_instances.append(instance) new_scores.append(score) elif score > HyperParams.get().ensemble_th_no_rcnn: new_instances.append(instance) new_scores.append(score) instances, scores = new_instances, new_scores # nms with rcnn instances = instances + rcnn_instances scores = scores + rcnn_scores watch.start() logger.debug('nms_rcnn+ size=%d' % len(instances)) instances, scores = Network.nms( instances, scores, None, thresh=HyperParams.get().ensemble_nms_iou) watch.stop() logger.debug('nms_rcnn- size=%d elapsed=%.5f' % (len(instances), watch.get_elapsed())) watch.reset() # remove overlaps logger.debug('remove overlaps+') sorted_idx = [ i[0] for i in sorted(enumerate(instances), key=lambda x: get_size_of_mask(x[1]), reverse=False) ] instances = [instances[x] for x in sorted_idx] scores = [scores[x] for x in sorted_idx] instances2 = [ ndimage.morphology.binary_fill_holes(i) for i in instances ] instances2, scores2 = Network.remove_overlaps(instances2, scores) # remove deleted instances logger.debug('remove deleted+ size=%d' % len(instances2)) voted = [] for x in instances2: voted.append(filter_by_voting((x, instances, 1, 0.75, None, None))) instances = list(compress(instances2, voted)) scores = list(compress(scores2, voted)) # TODO : Filter by score? logger.debug('filter by score+ size=%d' % len(instances)) score_filter_th = HyperParams.get().ensemble_score_th if score_filter_th > 0.0: logger.debug('filter_by_score=%.3f' % score_filter_th) instances = [ i for i, s in zip(instances, scores) if s > score_filter_th ] scores = [ s for i, s in zip(instances, scores) if s > score_filter_th ] logger.debug('finishing+ size=%d' % len(instances)) image = d.image(is_gray=False) score_desc = [] labels = [] if len(d.masks) > 0: # has label masks labels = list(d.multi_masks(transpose=False)) tp, fp, fn = get_multiple_metric(thr_list, instances, labels) logger.debug('instances=%d, labels=%d' % (len(instances), len(labels))) for i, thr in enumerate(thr_list): desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % ( (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr) logger.debug(desc) score_desc.append(desc) score = np.mean(tp / (tp + fp + fn)) logger.debug('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' % (score, np.mean(tp), np.mean(fp), np.mean(fn))) else: score = 0.0 if show: img_vis = Network.visualize(image, labels, instances, None) cv2.imshow('valid', img_vis) cv2.waitKey(0) else: return { 'instance_scores': scores, 'score': score, 'image': image, 'instances': instances, 'labels': labels, 'score_desc': score_desc }
def single_id(self, model, checkpoint, single_id, set_type='train', show=True, verbose=True): if model: self.set_network(model) self.network.build() self.init_session() if checkpoint: saver = tf.train.Saver() saver.restore(self.sess, checkpoint) if verbose: logger.info('restored from checkpoint, %s' % checkpoint) d = self._get_cell_data(single_id, set_type) h, w = d.img.shape[:2] shortedge = min(h, w) logger.debug('%s image size=(%d x %d)' % (single_id, w, h)) watch = StopWatch() logger.debug('preprocess+') d = self.network.preprocess(d) image = d.image(is_gray=False) total_instances = [] total_scores = [] total_from_set = [] cutoff_instance_max = HyperParams.get().post_cutoff_max_th cutoff_instance_avg = HyperParams.get().post_cutoff_avg_th watch.start() logger.debug('inference at default scale+ %dx%d' % (w, h)) inference_result = self.network.inference( self.sess, image, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_pre, scores_pre = inference_result[ 'instances'], inference_result['scores'] instances_pre = Network.resize_instances(instances_pre, target_size=(h, w)) total_instances = total_instances + instances_pre total_scores = total_scores + scores_pre total_from_set = [1] * len(instances_pre) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() logger.debug('inference with flips+') # re-inference using flip for flip_orientation in range(2): flipped = cv2.flip(image.copy(), flip_orientation) inference_result = self.network.inference( self.sess, flipped, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_flip, scores_flip = inference_result[ 'instances'], inference_result['scores'] instances_flip = [ cv2.flip(instance.astype(np.uint8), flip_orientation) for instance in instances_flip ] instances_flip = Network.resize_instances(instances_flip, target_size=(h, w)) total_instances = total_instances + instances_flip total_scores = total_scores + scores_flip total_from_set = total_from_set + [2 + flip_orientation ] * len(instances_flip) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() logger.debug('inference with scaling+flips+') # re-inference after rescale image def inference_with_scale(image, resize_target): image = cv2.resize(image.copy(), None, None, resize_target, resize_target, interpolation=cv2.INTER_AREA) inference_result = self.network.inference( self.sess, image, cutoff_instance_max=cutoff_instance_max, cutoff_instance_avg=cutoff_instance_avg) instances_rescale, scores_rescale = inference_result[ 'instances'], inference_result['scores'] instances_rescale = Network.resize_instances(instances_rescale, target_size=(h, w)) return instances_rescale, scores_rescale max_mask = get_max_size_of_masks(instances_pre) logger.debug('max_mask=%d' % max_mask) resize_target = HyperParams.get().test_aug_scale_t / max_mask resize_target = min(HyperParams.get().test_aug_scale_max, resize_target) resize_target = max(HyperParams.get().test_aug_scale_min, resize_target) import math # resize_target = 2.0 / (1.0 + math.exp(-1.5*(resize_target - 1.0))) # resize_target = max(0.5, resize_target) resize_target = max(228.0 / shortedge, resize_target) # if resize_target > 1.0 and min(w, h) > 1000: # logger.debug('too large image, no resize') # resize_target = 0.8 logger.debug('resize_target=%.4f' % resize_target) instances_rescale, scores_rescale = inference_with_scale( image, resize_target) total_instances = total_instances + instances_rescale total_scores = total_scores + scores_rescale total_from_set = total_from_set + [4] * len(instances_rescale) # re-inference using flip + rescale for flip_orientation in range(2): flipped = cv2.flip(image.copy(), flip_orientation) instances_flip, scores_flip = inference_with_scale( flipped, resize_target) instances_flip = [ cv2.flip(instance.astype(np.uint8), flip_orientation) for instance in instances_flip ] instances_flip = Network.resize_instances(instances_flip, target_size=(h, w)) total_instances = total_instances + instances_flip total_scores = total_scores + scores_flip total_from_set = total_from_set + [5 + flip_orientation ] * len(instances_flip) watch.stop() logger.debug('inference- elapsed=%.5f' % watch.get_elapsed()) watch.reset() watch.start() logger.debug('voting+ size=%d' % len(total_instances)) # TODO : Voting? voting_th = HyperParams.get().post_voting_th rects = [get_rect_of_mask(a) for a in total_instances] voted = [] for i, x in enumerate(total_instances): voted.append( filter_by_voting( (x, total_instances, voting_th, 0.3, rects[i], rects))) total_instances = list(compress(total_instances, voted)) total_scores = list(compress(total_scores, voted)) total_from_set = list(compress(total_from_set, voted)) watch.stop() logger.debug('voting elapsed=%.5f' % watch.get_elapsed()) watch.reset() # nms watch.start() logger.debug('nms+ size=%d' % len(total_instances)) instances, scores = Network.nms( total_instances, total_scores, total_from_set, thresh=HyperParams.get().test_aug_nms_iou) watch.stop() logger.debug('nms elapsed=%.5f' % watch.get_elapsed()) watch.reset() # remove overlaps logger.debug('remove overlaps+') sorted_idx = [ i[0] for i in sorted(enumerate(instances), key=lambda x: get_size_of_mask(x[1]), reverse=True) ] instances = [instances[x] for x in sorted_idx] scores = [scores[x] for x in sorted_idx] instances = [ ndimage.morphology.binary_fill_holes(i) for i in instances ] instances, scores = Network.remove_overlaps(instances, scores) # TODO : Filter by score? # logger.debug('filter by score+') # score_filter_th = HyperParams.get().post_filter_th # if score_filter_th > 0.0: # logger.debug('filter_by_score=%.3f' % score_filter_th) # instances = [i for i, s in zip(instances, scores) if s > score_filter_th] # scores = [s for i, s in zip(instances, scores) if s > score_filter_th] logger.debug('finishing+') image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) score_desc = [] labels = [] if len(d.masks) > 0: # has label masks labels = list(d.multi_masks(transpose=False)) labels = Network.resize_instances(labels, target_size=(h, w)) tp, fp, fn = get_multiple_metric(thr_list, instances, labels) if verbose: logger.info('instances=%d, reinf(%.3f) labels=%d' % (len(instances), resize_target, len(labels))) for i, thr in enumerate(thr_list): desc = 'score=%.3f, tp=%d, fp=%d, fn=%d --- iou %.2f' % ( (tp / (tp + fp + fn))[i], tp[i], fp[i], fn[i], thr) if verbose: logger.info(desc) score_desc.append(desc) score = np.mean(tp / (tp + fp + fn)) if verbose: logger.info('score=%.3f, tp=%.1f, fp=%.1f, fn=%.1f --- mean' % (score, np.mean(tp), np.mean(fp), np.mean(fn))) else: score = 0.0 if show: img_vis = Network.visualize(image, labels, instances, None) cv2.imshow('valid', img_vis) cv2.waitKey(0) if not model: return { 'instance_scores': scores, 'score': score, 'image': image, 'instances': instances, 'labels': labels, 'score_desc': score_desc }