Esempio n. 1
0
    def track_features(self, timeholder, visitor_data, channel_regions,
                       position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for tracks in visitor_data.itervalues():
            for startid, event_data in tracks.iteritems():
                if startid in ('_full_tracks', '_current_branch'):
                    continue
                for chname, region in channel_regions.iteritems():
                    for region_name, feature_names in region.iteritems():
                        try:
                            frame, obj_label, branch = Tracker.split_nodeid(
                                startid)
                        except ValueError:
                            frame, obj_label = Tracker.split_nodeid(startid)
                            branch = 1

                        filename = 'features__P%s__T%05d__O%04d__B%02d__C%s__R%s.txt' \
                            %(position, frame, obj_label, branch, chname, region_name)
                        filename = join(outdir, filename)

                        self._data_per_channel(timeholder, event_data,
                                               filename, chname, region_name,
                                               feature_names, position)
Esempio n. 2
0
    def full_tracks(self, timeholder, visitor_data, position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for start_id, data in visitor_data.iteritems():
            for idx, track in enumerate(data['_full']):
                has_header = False
                line1 = []
                line2 = []
                line3 = []

                frame, obj_label= Tracker.split_nodeid(start_id)[:2]
                filename = 'P%s__T%05d__O%04d__B%02d.txt' \
                    %(position, frame, obj_label, idx+1)
                f = file(join(outdir, filename), 'w')

                for node_id in track:
                    frame, obj_id = Tracker.split_nodeid(node_id)

                    coordinate = Coordinate(position=position, time=frame)
                    prefix = [frame, self.meta_data.get_timestamp_relative(coordinate), obj_id]
                    prefix_names = ['frame', 'time', 'objID']
                    items = []

                    for channel in timeholder[frame].values():
                        for region_id in channel.region_names():
                            region = channel.get_region(region_id)
                            if obj_id in region:
                                flkp = self._map_feature_names(region.feature_names)
                                if not has_header:
                                    keys = ['classLabel', 'className']
                                    if channel.NAME == 'Primary':
                                        keys += ['centerX', 'centerY']
                                    keys += flkp.keys()
                                    line1 += [channel.NAME.upper()] * len(keys)
                                    line2 += [str(region_id)] * len(keys)
                                    line3 += keys
                                obj = region[obj_id]
                                features = region.features_by_name(obj_id, flkp.values())
                                values = [x if not x is None else '' for x in [obj.iLabel, obj.strClassName]]
                                if channel.NAME == 'Primary':
                                    values += [obj.oCenterAbs[0], obj.oCenterAbs[1]]
                                values += list(features)
                                items.extend(values)

                    if not has_header:
                        has_header = True
                        prefix_str = [''] * len(prefix)
                        line1 = prefix_str + line1
                        line2 = prefix_str + line2
                        line3 = prefix_names + line3
                        f.write('%s\n' %CSVParams.sep.join(line1))
                        f.write('%s\n' %CSVParams.sep.join(line2))
                        f.write('%s\n' %CSVParams.sep.join(line3))

                    f.write('%s\n' %CSVParams.sep.join([str(i) for i in prefix + items]))
                f.close()
Esempio n. 3
0
 def start_nodes(self):
     """Return all start nodes i.e. nodes without incoming edges."""
     start_nodes = [
         node_id for node_id in self.graph.node_list()
         if self.graph.in_degree(node_id) == 0
     ]
     start_nodes.sort(key=lambda x: Tracker.split_nodeid(x)[0])
     return start_nodes
Esempio n. 4
0
    def track_features(self, timeholder,  visitor_data, channel_regions,
                              position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for tracks in visitor_data.itervalues():
            for startid, event_data in tracks.iteritems():
                if startid.startswith('_'):
                    continue
                for chname, region in channel_regions.iteritems():
                    for region_name, feature_names in region.iteritems():
                        try:
                            frame, obj_label, branch = Tracker.split_nodeid(startid)
                        except ValueError:
                            frame, obj_label = Tracker.split_nodeid(startid)
                            branch = 1
                        filename = 'features_P%s_T%05d_O%04d_B%02d_C%s_R%s.txt' \
                            %(position, frame, obj_label, branch, chname, region_name)
                        filename = join(outdir, filename)

                        self._data_per_channel(timeholder, event_data, filename, chname,
                                               region_name, feature_names, position)
Esempio n. 5
0
 def centers(self):
     """Return the a list of the object centers for each track."""
     centers = dict()
     for startid, eventdata in self.iterevents():
         if startid in ['_full_tracks', '_current_branch']:
             continue
         data = list()
         for nodeids in zip(*eventdata['tracks']):
             for nodeid in nodeids:
                 obj = self.graph.node_data(nodeid)
                 frame = Tracker.split_nodeid(nodeid)[0]
                 data.append((int(frame), obj.iId, obj.oCenterAbs))
         centers[startid] = data
     return centers
Esempio n. 6
0
 def centers(self):
     """Return the a list of the object centers for each track."""
     centers = dict()
     for startid, eventdata in self.iterevents():
         if startid in ['_full_tracks', '_current_branch']:
             continue
         data = list()
         for nodeids in zip(*eventdata['tracks']):
             for nodeid in nodeids:
                 obj = self.graph.node_data(nodeid)
                 frame = Tracker.split_nodeid(nodeid)[0]
                 data.append((int(frame), obj.iId, obj.oCenterAbs))
         centers[startid] = data
     return centers
Esempio n. 7
0
    def bboxes(self, size=None, border=0):
        bboxes = {}
        for startid, eventdata in self.iterevents():
            if startid in ['_full', '_current']:
                continue
            data = []
            for nodeids in zip(*eventdata['tracks']):
                nodeid = nodeids[0]
                frame = Tracker.split_nodeid(nodeid)[0]
                objids = [Tracker.split_nodeid(n)[1] for n in nodeids]
                objects = [self.graph.node_data(n) for n in nodeids]

                minX = min([obj.oRoi.upperLeft[0] for obj in objects])
                minY = min([obj.oRoi.upperLeft[1] for obj in objects])
                maxX = max([obj.oRoi.lowerRight[0] for obj in objects])
                maxY = max([obj.oRoi.lowerRight[1] for obj in objects])
                width = maxX - minX + 1
                height = maxY - minY + 1
                centerX = int(
                    round(np.average([obj.oCenterAbs[0] for obj in objects])))
                centerY = int(
                    round(np.average([obj.oCenterAbs[1] for obj in objects])))
                data.append((frame, centerX, centerY, width, height, objids))
            data1 = np.array(data, 'O')
            if not size is None and len(size) == 2:
                diffX = int(size[0] / 2)
                diffY = int(size[1] / 2)
            else:
                diffX = int(max(data1[:, 3]) / 2 + border)
                diffY = int(max(data1[:, 4]) / 2 + border)
            # convert to float to for numpy float64 type
            timedata = [(int(d[0]), (d[1] - diffX, d[2] - diffY,
                                     d[1] + diffX - 1 + size[0] % 2,
                                     d[2] + diffY - 1 + size[1] % 2), d[5])
                        for d in data1]
            bboxes[startid] = timedata
        return bboxes
Esempio n. 8
0
    def bboxes(self, size=None, border=0):
        bboxes = {}
        for startid, eventdata in self.iterevents():
            if startid  in ['_full', '_current']:
                continue
            data = []
            for nodeids in zip(*eventdata['tracks']):
                nodeid = nodeids[0]
                frame = Tracker.split_nodeid(nodeid)[0]
                objids = [Tracker.split_nodeid(n)[1] for n in nodeids]
                objects = [self.graph.node_data(n) for n in nodeids]

                minX = min([obj.oRoi.upperLeft[0] for obj in objects])
                minY = min([obj.oRoi.upperLeft[1] for obj in objects])
                maxX = max([obj.oRoi.lowerRight[0] for obj in objects])
                maxY = max([obj.oRoi.lowerRight[1] for obj in objects])
                width  = maxX - minX + 1
                height = maxY - minY + 1
                centerX = int(round(np.average([obj.oCenterAbs[0] for obj in objects])))
                centerY = int(round(np.average([obj.oCenterAbs[1] for obj in objects])))
                data.append((frame, centerX, centerY, width, height, objids))
            data1 = np.array(data, 'O')
            if not size is None and len(size) == 2:
                diffX = int(size[0] / 2)
                diffY = int(size[1] / 2)
            else:
                diffX = int(max(data1[:,3])/2 + border)
                diffY = int(max(data1[:,4])/2 + border)
            # convert to float to for numpy float64 type
            timedata = [(int(d[0]),
                         (d[1] - diffX,
                          d[2] - diffY,
                          d[1] + diffX - 1 + size[0] %2,
                          d[2] + diffY - 1 + size[1] %2),
                         d[5]) for d in data1]
            bboxes[startid] = timedata
        return bboxes
Esempio n. 9
0
 def _split_nodeid(self, nodeid):
     ret = Tracker.split_nodeid(nodeid)
     if len(ret) == 2:
         ret = ret + (1,)
     return ret
Esempio n. 10
0
    def _data_per_channel(self, timeholder, event_data, filename, channel_name, region_name,
                          feature_names, position):

        eventid = event_data['eventId']
        event_frame, _ = Tracker.split_nodeid(eventid)
        has_split = 'splitId' in event_data

        header_names = ['Frame', 'Timestamp', 'isEvent']
        if has_split:
            header_names.append('isSplit')
            if event_data['splitId'] is not None:
                split_frame, _ = Tracker.split_nodeid(event_data['splitId'])
            else:
                split_frame = None

        table = []
        # zip nodes with same time together
        for nodeids in zip(*event_data['tracks']):
            objids = []
            frame = None
            for nodeid in nodeids:
                node_frame, objid = Tracker.split_nodeid(nodeid)
                if frame is None:
                    frame = node_frame
                else:
                    assert frame == node_frame
                objids.append(objid)

            channel = timeholder[frame][channel_name]
            sample_holder = channel.get_region(region_name)

            if feature_names is None:
                feature_names = sample_holder.feature_names

            if CSVParams.objId not in header_names:
                # setup header line
                header_names.append(CSVParams.objId)
                header_names += [CSVParams.class_ %x for x in ['name', 'label', 'probability']]
                # only feature_names scales according to settings
                header_names += [CSVParams.feature %fn for fn in feature_names]
                header_names += [CSVParams.tracking %tf for tf in CSVParams.tracking_features]

            coordinate = Coordinate(position=position, time=frame)
            data = {'Frame' : frame,
                    'Timestamp': self.meta_data.get_timestamp_relative(coordinate),
                    'isEvent': int(frame==event_frame)}

            if has_split:
                data['isSplit'] = int(frame==split_frame)

            #for iIdx, iObjId in enumerate(lstObjectIds):
            objid = objids[0]
            if objid in sample_holder:
                sample = sample_holder[objid]
                data[CSVParams.objId] = objid

                # classification data
                if sample.iLabel is not None:
                    data[CSVParams.class_ %'label'] = sample.iLabel
                    data[CSVParams.class_ %'name'] = sample.strClassName
                    data[CSVParams.class_ %'probability'] = \
                        ','.join(['%d:%.5f' % (int(x),y) for x,y in sample.dctProb.iteritems()])

                common_ftr = [f for f in set(sample_holder.feature_names).intersection(feature_names)]
                features = sample_holder.features_by_name(objid, common_ftr)
                for feature, fname in zip(features, common_ftr):
                    data[CSVParams.feature %fname] = feature

                # features not calculated are exported as NAN
                diff_ftr = [f for f in set(feature_names).difference(sample_holder.feature_names)]
                for df in diff_ftr:
                    data[CSVParams.feature %df] = float("NAN")

                # object tracking data (absolute center)
                data[CSVParams.tracking %'center_x'] = sample.oCenterAbs[0]
                data[CSVParams.tracking %'center_y'] = sample.oCenterAbs[1]
                data[CSVParams.tracking %'upperleft_x'] = sample.oRoi.upperLeft[0]
                data[CSVParams.tracking %'upperleft_y'] = sample.oRoi.upperLeft[1]
                data[CSVParams.tracking %'lowerright_x'] = sample.oRoi.lowerRight[0]
                data[CSVParams.tracking %'lowerright_y'] = sample.oRoi.lowerRight[1]
            else:
                # we rather skip the entire event in case the object ID is not valid
                return
            table.append(data)

        if len(table) > 0:
            with open(filename, 'w') as fp:
                writer = csv.DictWriter(fp, fieldnames=header_names,
                                        delimiter=CSVParams.sep)
                writer.writeheader()
                writer.writerows(table)
Esempio n. 11
0
 def start_nodes(self):
     """Return all start nodes i.e. nodes without incoming edges."""
     start_nodes = [node_id for node_id in self.graph.node_list()
                    if self.graph.in_degree(node_id) == 0]
     start_nodes.sort(key=lambda x: Tracker.split_nodeid(x)[0])
     return start_nodes
Esempio n. 12
0
    def __init__(self, eventselector, strPathIn, oP, strPathOut,
                 imageCompression="85",
                 imageSuffix=".jpg",
                 border=0,
                 writeSubdirs=True,
                 writeDescription=True,
                 size=None,
                 oneFilePerTrack=False):

        self._bHasImages = False
        dctTimePoints = {}

        for strStartId, lstTimeData in eventselector.bboxes( \
            size=size, border=border).iteritems():
            items = Tracker.split_nodeid(strStartId)
            iStartT, iObjId = items[:2]
            if len(items) == 3:
                branch_id = items[2]
            else:
                branch_id = 1

            if writeSubdirs:
                strPathOutEvent = os.path.join(strPathOut,
                                               self._format_name(oP, iStartT, iObjId, branch_id))
            else:
                strPathOutEvent = strPathOut
            makedirs(strPathOutEvent)

            if writeDescription:
                oFile = file(os.path.join(strPathOutEvent,
                                          "_%s.txt" % self._format_name(oP, iStartT, iObjId, branch_id)), "w")
                lstData = ["Frame", "ObjId", "x1", "y1", "x2", "y2"]
                oFile.write("%s\n" % "\t".join(map(str, lstData)))

            for iCnt, (iT, tplBoundingBox, lstObjIds) in enumerate(lstTimeData):

                if writeDescription:
                    lstData = [iT, ';'.join(map(str, lstObjIds))] + list(tplBoundingBox)
                    oFile.write("%s\n" % "\t".join(map(str, lstData)))
                if not iT in dctTimePoints:
                    dctTimePoints[iT] = []
                dctTimePoints[iT].append((strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox))

            if writeDescription:
                oFile.close()

        for idx, (iT, lstItems) in enumerate(dctTimePoints.iteritems()):

            #print iT, lstItems
            imgXY = self._getImage(strPathIn, iT)

            for strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox in lstItems:

                x1, y1, x2, y2 = tplBoundingBox
                x1Corr = 0 if x1 < 0 else x1
                y1Corr = 0 if y1 < 0 else y1
                x2Corr = imgXY.width-1 if x2 >= imgXY.width else x2
                y2Corr = imgXY.height-1 if y2 >= imgXY.height else y2

                imgSub = ccore.subImage(imgXY,
                                        ccore.Diff2D(x1Corr, y1Corr),
                                        ccore.Diff2D(x2Corr-x1Corr+1, y2Corr-y1Corr+1))

                if (x1 < 0 or y1 < 0 or
                    x2 >= imgXY.width or y2 >= imgXY.height):
                    imgSub2 = self.IMAGE_CLASS(size[0], size[1])
                    ccore.copySubImage(imgSub, imgSub2, ccore.Diff2D(x1Corr-x1, y1Corr-y1))
                    imgSub = imgSub2

                assert imgSub.width == size[0]
                assert imgSub.width == x2-x1+1
                assert imgSub.height == size[1]
                assert imgSub.height == y2-y1+1

                if self.PROCESS_LABEL:
                    lstImages = []
                    for iObjId in lstObjIds:
                        lstImages.append(ccore.copyImageIfLabel(imgSub, imgSub, iObjId))
                    imgSub = ccore.projectImage(lstImages, ccore.ProjectionType.MaxProjection)

                strFilenameImage = os.path.join(strPathOutEvent, "P%s__T%05d%s" % (oP, iT, imageSuffix))
                ccore.writeImage(imgSub, strFilenameImage)

        if oneFilePerTrack and os.path.isdir(strPathOut):
            self.convertToOneFilePerTrack(strPathOut, imageCompression)
Esempio n. 13
0
    def full_tracks(self, timeholder, visitor_data, position, outdir):
        shutil.rmtree(outdir, True)
        makedirs(outdir)

        for start_id, data in visitor_data.iteritems():
            for idx, track in enumerate(data['_full_tracks']):
                has_header = False
                line1 = []
                line2 = []
                line3 = []

                frame, obj_label = Tracker.split_nodeid(start_id)[:2]
                filename = 'P%s__T%05d__O%04d__B%02d.txt' \
                    %(position, frame, obj_label, idx+1)
                f = file(join(outdir, filename), 'w')

                for node_id in track:
                    frame, obj_id = Tracker.split_nodeid(node_id)

                    coordinate = Coordinate(position=position, time=frame)
                    prefix = [
                        frame,
                        self.meta_data.get_timestamp_relative(coordinate),
                        obj_id
                    ]
                    prefix_names = ['frame', 'time', 'objID']
                    items = []

                    for channel in timeholder[frame].values():
                        for region_id in channel.region_names():
                            region = channel.get_region(region_id)
                            if obj_id in region:
                                flkp = self._map_feature_names(
                                    region.feature_names)
                                if not has_header:
                                    keys = ['classLabel', 'className']
                                    if channel.NAME == 'Primary':
                                        keys += ['centerX', 'centerY']
                                    keys += flkp.keys()
                                    line1 += [channel.NAME.upper()] * len(keys)
                                    line2 += [str(region_id)] * len(keys)
                                    line3 += keys
                                obj = region[obj_id]
                                features = region.features_by_name(
                                    obj_id, flkp.values())
                                values = [
                                    x if not x is None else ''
                                    for x in [obj.iLabel, obj.strClassName]
                                ]
                                if channel.NAME == 'Primary':
                                    values += [
                                        obj.oCenterAbs[0], obj.oCenterAbs[1]
                                    ]
                                values += list(features)
                                items.extend(values)

                    if not has_header:
                        has_header = True
                        prefix_str = [''] * len(prefix)
                        line1 = prefix_str + line1
                        line2 = prefix_str + line2
                        line3 = prefix_names + line3
                        f.write('%s\n' % CSVParams.sep.join(line1))
                        f.write('%s\n' % CSVParams.sep.join(line2))
                        f.write('%s\n' % CSVParams.sep.join(line3))

                    f.write(
                        '%s\n' %
                        CSVParams.sep.join([str(i) for i in prefix + items]))
                f.close()
Esempio n. 14
0
    def _data_per_channel(self, timeholder, event_data, filename, channel_name,
                          region_name, feature_names, position):

        eventid = event_data['eventId']
        event_frame, _ = Tracker.split_nodeid(eventid)
        has_split = 'splitId' in event_data

        header_names = ['Frame', 'Timestamp', 'isEvent']
        if has_split:
            header_names.append('isSplit')
            if event_data['splitId'] is not None:
                split_frame, _ = Tracker.split_nodeid(event_data['splitId'])
            else:
                split_frame = None

        table = []
        # zip nodes with same time together
        for nodeids in zip(*event_data['tracks']):
            objids = []
            frame = None
            for nodeid in nodeids:
                node_frame, objid = Tracker.split_nodeid(nodeid)
                if frame is None:
                    frame = node_frame
                else:
                    assert frame == node_frame
                objids.append(objid)

            channel = timeholder[frame][channel_name]
            sample_holder = channel.get_region(region_name)

            if feature_names is None:
                feature_names = sample_holder.feature_names

            if CSVParams.objId not in header_names:
                # setup header line
                header_names.append(CSVParams.objId)
                header_names += [
                    CSVParams.class_ % x
                    for x in ['name', 'label', 'probability']
                ]
                # only feature_names scales according to settings
                header_names += [
                    CSVParams.feature % fn for fn in feature_names
                ]
                header_names += [
                    CSVParams.tracking % tf
                    for tf in CSVParams.tracking_features
                ]

            coordinate = Coordinate(position=position, time=frame)
            data = {
                'Frame': frame,
                'Timestamp': self.meta_data.get_timestamp_relative(coordinate),
                'isEvent': int(frame == event_frame)
            }

            if has_split:
                data['isSplit'] = int(frame == split_frame)

            #for iIdx, iObjId in enumerate(lstObjectIds):
            objid = objids[0]
            if objid in sample_holder:
                sample = sample_holder[objid]
                data[CSVParams.objId] = objid

                # classification data
                if sample.iLabel is not None:
                    data[CSVParams.class_ % 'label'] = sample.iLabel
                    data[CSVParams.class_ % 'name'] = sample.strClassName
                    data[CSVParams.class_ %'probability'] = \
                        ','.join(['%d:%.5f' % (int(x),y) for x,y in
                                  sample.dctProb.iteritems()])

                common_ftr = [
                    f for f in set(sample_holder.feature_names).intersection(
                        feature_names)
                ]
                features = sample_holder.features_by_name(objid, common_ftr)
                for feature, fname in zip(features, common_ftr):
                    data[CSVParams.feature % fname] = feature

                # features not calculated are exported as NAN
                diff_ftr = [
                    f for f in set(feature_names).difference(
                        sample_holder.feature_names)
                ]
                for df in diff_ftr:
                    data[CSVParams.feature % df] = float("NAN")

                # object tracking data (absolute center)
                data[CSVParams.tracking % 'center_x'] = sample.oCenterAbs[0]
                data[CSVParams.tracking % 'center_y'] = sample.oCenterAbs[1]
                data[CSVParams.tracking %
                     'upperleft_x'] = sample.oRoi.upperLeft[0]
                data[CSVParams.tracking %
                     'upperleft_y'] = sample.oRoi.upperLeft[1]
                data[CSVParams.tracking %
                     'lowerright_x'] = sample.oRoi.lowerRight[0]
                data[CSVParams.tracking %
                     'lowerright_y'] = sample.oRoi.lowerRight[1]
            else:
                # we rather skip the entire event in case the object ID is not valid
                return
            table.append(data)

        if len(table) > 0:
            with open(filename, 'wb') as fp:
                writer = csv.DictWriter(fp,
                                        fieldnames=header_names,
                                        delimiter=CSVParams.sep)
                writer.writeheader()
                writer.writerows(table)
Esempio n. 15
0
 def _split_nodeid(self, nodeid):
     ret = Tracker.split_nodeid(nodeid)
     if len(ret) == 2:
         ret = ret + (1, )
     return ret