Exemplo n.º 1
0
def postprocess_regular(image_info, np_pred_boxes, np_pred_confidences, H,
                        options):
    pred_anno = al.Annotation()
    pred_anno.imageName = image_info['path']
    pred_anno.imagePath = os.path.abspath(image_info['path'])
    _, rects = add_rectangles(H, [image_info['transformed']],
                              np_pred_confidences,
                              np_pred_boxes,
                              use_stitching=True,
                              rnn_len=H['rnn_len'],
                              min_conf=options['min_conf'],
                              tau=options['tau'],
                              show_suppressed=False)

    h, w = image_info['original_shape']
    if 'rotate90' in H['data'] and H['data']['rotate90']:
        # original image height is a width for rotated one
        rects = Rotate90.invert(h, rects)

    rects = [
        r for r in rects
        if r.x1 < r.x2 and r.y1 < r.y2 and r.score > options['min_conf']
    ]
    pred_anno.rects = rects
    pred_anno = rescale_boxes((H['image_height'], H['image_width']), pred_anno,
                              h, w)
    return pred_anno
Exemplo n.º 2
0
def annotation_labelbox_from_result(result, columns, url_prefix):
    annotation = AnnotationLib.Annotation()
    image_url = get_path_from_s3(result[columns['annotation']], url_prefix)
    boxes = parse_boxes(result[columns['Answer.results']])
    for box in boxes:
        annotation.rects.append(AnnotationLib.AnnoRect(*box))

    # remove parameters from filename
    param_idx = image_url.index('&');    
    annotation.imageName = image_url[:param_idx];

    return annotation
Exemplo n.º 3
0
def annotation_empty_from_result(result, columns, url_prefix):
    annotation = AnnotationLib.Annotation()
    image_url = get_path_from_s3(result[columns['annotation']], url_prefix)

    # remove parameters from filename
    param_idx = image_url.index('&');    
    annotation.imageName = image_url[:param_idx];

    # MA: assume filenames don't contain ","
    cidx = annotation.imageName.find(',')
    if cidx >= 0:
        annotation.imageName = annotation.imageName[:cidx];

    assert(len(annotation.imageName) > 0);

    return annotation
Exemplo n.º 4
0
def annotation_labelact_from_result(result, columns, url_prefix):
    annotation = AnnotationLib.Annotation()
    image_url = get_path_from_s3(result[columns['annotation']], url_prefix)

    # remove parameters from filename
    param_idx = image_url.index('&');    

    image_url_split = image_url[:param_idx].split(',');
    assert(len(image_url_split) == 5);

    annotation.imageName = image_url_split[0];

    rect = AnnotationLib.AnnoRect();
    rect.x1 = int(image_url_split[1])
    rect.y1 = int(image_url_split[2])
    rect.x2 = int(image_url_split[3])
    rect.y2 = int(image_url_split[4])

    result_split = result[columns['Answer.results']].split(',');
    assert(len(result_split) == 1 + 1 + 4 + 4);

    # male/female
    gender_idx_to_val = {0: ATTR_VAL_GENDER_MALE, 1: ATTR_VAL_GENDER_FEMALE};
    ptype_idx_to_val = {0: ATTR_VAL_PTYPE_SALES, 1: ATTR_VAL_PTYPE_CUST};

    sales_act_idx_to_val = {0: ATTR_VAL_ACT_SALES_INT, 1: ATTR_VAL_ACT_SALES_CLEAN, 2: ATTR_VAL_ACT_SALES_OTHER};
    cust_act_idx_to_val = {0: ATTR_VAL_ACT_CUST_QUEUE, 1: ATTR_VAL_ACT_CUST_INT, 2: ATTR_VAL_ACT_CUST_BROWSE, 3: ATTR_VAL_ACT_CUST_OTHER};

    gender_idx = int(result_split[-4]);
    rect.at["gender"] = gender_idx_to_val[gender_idx];

    # sales/cust
    ptype_idx = int(result_split[-3]);
    rect.at["ptype"] = ptype_idx_to_val[ptype_idx];
    
    if ptype_idx == 0:
        # interact/clean/other
        act_idx = int(result_split[-2]);
        rect.at["act"] = sales_act_idx_to_val[act_idx];
    else:
        # queue/interact/browse/other
        act_idx = int(result_split[-1]);
        rect.at["act"] = cust_act_idx_to_val[act_idx];
    
    annotation.rects.append(rect);
    return annotation
Exemplo n.º 5
0
    def eval(self, weights, test_boxes, min_conf, tau, show_suppressed, expname):
        self.H["grid_width"] = self.H["image_width"] / self.H["region_size"]
        self.H["grid_height"] = self.H["image_height"] / self.H["region_size"]
        x_in = tf.placeholder(tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3])
        if self.H['use_rezoom']:
            pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = self.build_forward(tf.expand_dims(x_in, 0), 'test', reuse=None)
            grid_area = self.H['grid_height'] * self.H['grid_width']
            pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])),
                                          [grid_area, self.H['rnn_len'], 2])
            if self.H['reregress']:
                pred_boxes = pred_boxes + pred_boxes_deltas
        else:
            pred_boxes, pred_logits, pred_confidences = self.build_forward(tf.expand_dims(x_in, 0), 'test', reuse=None)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver.restore(sess, weights)

            pred_annolist = al.AnnoList()

            true_annolist = al.parse(test_boxes)
            data_dir = os.path.dirname(test_boxes)
            image_dir = self.get_image_dir(weights, expname, test_boxes)
            subprocess.call('mkdir -p %s' % image_dir, shell=True)
            for i in range(len(true_annolist)):
                true_anno = true_annolist[i]
                orig_img = imread('%s/%s' % (data_dir, true_anno.imageName))[:,:,:3]
                img = imresize(orig_img, (self.H["image_height"], self.H["image_width"]), interp='cubic')
                feed = {x_in: img}
                (np_pred_boxes, np_pred_confidences) = sess.run([pred_boxes, pred_confidences], feed_dict=feed)
                pred_anno = al.Annotation()
                pred_anno.imageName = true_anno.imageName
                new_img, rects = add_rectangles(self.H, [img], np_pred_confidences, np_pred_boxes,
                                                use_stitching=True, rnn_len=self.H['rnn_len'], min_conf=min_conf, tau=tau, show_suppressed=show_suppressed)
            
                pred_anno.rects = rects
                pred_anno.imagePath = os.path.abspath(data_dir)
                pred_anno = rescale_boxes((self.H["image_height"], self.H["image_width"]), pred_anno, orig_img.shape[0], orig_img.shape[1])
                pred_annolist.append(pred_anno)
                
                imname = '%s/%s' % (image_dir, os.path.basename(true_anno.imageName))
                misc.imsave(imname, new_img)
                if i % 25 == 0:
                    print(i)
        return pred_annolist, true_annolist
Exemplo n.º 6
0
def pal2al(_annolist):
    annotations = []

    for _a in _annolist.annotation:
        anno = AnnotationLib.Annotation()

        anno.imageName = _a.imageName

        anno.rects = []

        for _r in _a.rect:
            rect = AnnotationLib.AnnoRect()

            rect.x1 = _r.x1
            rect.x2 = _r.x2
            rect.y1 = _r.y1
            rect.y2 = _r.y2

            if _r.HasField("id"):
                rect.id = _r.id

            if _r.HasField("track_id"):
                rect.track_id = _r.track_id

            if _r.HasField("score"):
                rect.score = _r.score

            if _r.HasField("distance3d"):
                rect.distance3d = _r.distance3d

            if _r.HasField("width3d"):
                rect.width3d = _r.width3d

            if _r.HasField("height3d"):
                rect.height3d = _r.height3d

            if _r.HasField("length3d"):
                rect.length3d = _r.length3d

            anno.rects.append(rect)

        annotations.append(anno)

    return annotations
Exemplo n.º 7
0
def pal2al(_annolist):
    #annotations = [];
    annotations = AnnotationLib.AnnoList()

    for adesc in _annolist.attribute_desc:
        annotations.attribute_desc[adesc.name] = adesc
        print "attribute: ", adesc.name, adesc.id

        for valdesc in adesc.val_to_str:
            annotations.add_attribute_val(adesc.name, valdesc.s, valdesc.id)

    attribute_name_from_id = {
        adesc.id: aname
        for aname, adesc in annotations.attribute_desc.iteritems()
    }
    attribute_dtype_from_id = {
        adesc.id: adesc.dtype
        for aname, adesc in annotations.attribute_desc.iteritems()
    }

    for _a in _annolist.annotation:
        anno = AnnotationLib.Annotation()

        anno.imageName = _a.imageName

        anno.rects = []

        for _r in _a.rect:
            rect = AnnotationLib.AnnoRect()

            rect.x1 = _r.x1
            rect.x2 = _r.x2
            rect.y1 = _r.y1
            rect.y2 = _r.y2

            if _r.HasField("id"):
                rect.id = _r.id

            if _r.HasField("track_id"):
                rect.track_id = _r.track_id

            if _r.HasField("score"):
                rect.score = _r.score

            for _at in _r.attribute:
                try:
                    cur_aname = attribute_name_from_id[_at.id]
                    cur_dtype = attribute_dtype_from_id[_at.id]
                except KeyError as e:
                    print "attribute: ", _at.id
                    print e
                    assert (False)

                if cur_dtype == AnnotationLib.AnnoList.TYPE_INT32:
                    rect.at[cur_aname] = _at.val
                elif cur_dtype == AnnotationLib.AnnoList.TYPE_FLOAT:
                    rect.at[cur_aname] = _at.fval
                elif cur_dtype == AnnotationLib.AnnoList.TYPE_STRING:
                    rect.at[cur_aname] = _at.strval
                else:
                    assert (False)

            anno.rects.append(rect)

        annotations.append(anno)

    return annotations
Exemplo n.º 8
0
    if not os.path.exists(opts.output_dir):
        print "ERROR: output dir not found"
        exit()

    if not os.path.exists(opts.images_dir):
        print "ERROR: images dir not found"
        exit()

    imglist = glob.glob(opts.images_dir + "/*.jpeg")
    imglist.sort()

    # create image list
    annolist = []
    for imgidx in xrange(0, len(imglist)):
        anno = AnnotationLib.Annotation()
        anno.imageName = imglist[imgidx]
        anno.rects = []

        annolist.append(anno)

    # load vatic tracks
    vatic_dump = pickle.load(open(opts.vatic_filename, "rb"))

    num_tracks = len(vatic_dump)
    print "number of tracks: ", num_tracks

    for tidx in xrange(0, num_tracks):
        vatic_boxes = vatic_dump[tidx]["boxes"]
        track_len = len(vatic_boxes)
        print "track ", tidx, ", track_len: ", track_len