def __init__ (self, min_size=1): super().__init__() self.gt_matcher = cpp.GTMatcher(FLAGS.match_th, FLAGS.max_masks, min_size) self.priors = [] if os.path.exists(FLAGS.priors): with open(FLAGS.priors, 'r') as f: for l in f: if l[0] == '#': continue s, r = l.strip().split(' ') s, r = float(s), float(r) # w * h = s * s # w / h = r w = math.sqrt(s * s * r) h = math.sqrt(s * s / r) self.priors.append([w, h]) pass pass pass aardvark.print_red("PRIORS %s" % str(self.priors)) # TODO: need a better way to generalize this to multiple priors and 0 priors self.n_priors = len(self.priors) if self.n_priors == 0: self.n_priors = 1 pass
def __init__ (self, min_size=1): super().__init__() #self.gt_matcher = cpp.GTMatcher(FLAGS.match_th, FLAGS.max_masks, min_size) priors = [] if os.path.exists(FLAGS.priors): priors = load_priors(FLAGS.priors) if len(priors) == 0: priors.append([1,1]) aardvark.print_red("PRIORS %s" % str(priors)) # TODO: need a better way to generalize this to multiple priors and 0 priors self.n_priors = len(priors) self.priors = priors pass
def extra_stream_config (self, is_training): if len(self.priors) > 0: aardvark.print_red('priors %s' % str(self.priors)) augments = aardvark.load_augments(is_training) shift = 0 if is_training: shift = FLAGS.clip_shift return { "annotate": [1], "transforms": [{"type": "resize", "max_size": FLAGS.max_size}] + augments + [ #{"type": "clip", "round": FLAGS.backbone_stride}, {"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride}, {"type": "anchors.dense.box", 'downsize': FLAGS.anchor_stride, 'lower_th': FLAGS.lower_th, 'upper_th': FLAGS.upper_th, 'weighted': False, 'priors': self.priors, 'params_default': 1.0}, {"type": "box_feature"}, {"type": "rasterize"}, ] }
def __init__(self): priors = [] # read in priors # what RPN estimates is the delta between priors and the real # regression target. if os.path.exists(FLAGS.rpn_priors): with open(FLAGS.rpn_priors, 'r') as f: for l in f: if l[0] == '#': continue vs = [float(v) for v in l.strip().split(' ')] assert len(vs) == FLAGS.rpn_params priors.append(vs) pass pass pass if len(priors) == 0: priors.append([1.0] * FLAGS.rpn_params) pass aardvark.print_red("PRIORS %s" % str(priors)) self.priors = np.array(priors, dtype=np.float32) pass
def inference (self, images, classes, is_training): assert FLAGS.clip_stride % FLAGS.backbone_stride == 0 backbone = aardvark.create_stock_slim_network(FLAGS.backbone, images, is_training, global_pool=False, stride=FLAGS.backbone_stride) if FLAGS.finetune: backbone = tf.stop_gradient(backbone) with slim.arg_scope(aardvark.default_argscope(self.is_training)): if FLAGS.multistep > 0: if FLAGS.multistep == 1: aardvark.print_red("multistep = 1 doesn't converge well") net = slim_multistep_upscale(backbone, FLAGS.backbone_stride, FLAGS.reduction, FLAGS.multistep) logits = slim.conv2d(net, classes, 3, 1, activation_fn=None, padding='SAME') else: logits = slim.conv2d_transpose(backbone, classes, FLAGS.backbone_stride * 2, FLAGS.backbone_stride, activation_fn=None, padding='SAME') if FLAGS.finetune: assert FLAGS.colorspace == 'RGB' def is_trainable (x): return not x.startswith(FLAGS.backbone) self.init_session, self.variables_to_train = aardvark.setup_finetune(FLAGS.finetune, is_trainable) return logits
def __init__(self): super().__init__() if FLAGS.classes > 1: aardvark.print_red("Classes should be number of point classes,") aardvark.print_red("not counting background. Usually 1.") pass