示例#1
0
    def initOut(self):
        print '     Loading Data...'
        self.train_set = DatasetFromFolder(sequence_dir, '../MOT/MOT16/test/MOT16-%02d'%self.seq_index)

        detection_dir = self.out_dir +'res_training_det.txt'
        res_training = self.out_dir + 'res_training.txt'  # the result of the training data
        self.createTxt(detection_dir)
        self.createTxt(res_training)
        self.copyLines(self.seq_index, 1, detection_dir, self.tt, 1)

        self.evaluation(1, self.tt, detection_dir, res_training)
示例#2
0
    def initOut(self):
        start = time.time()
        print '     Loading Data...'
        print '     Training'
        self.train_set = DatasetFromFolder(sequence_dir)

        gt_training = self.out_dir + 'gt_training.txt'  # the gt of the training data
        self.copyLines(self.seq_index, 1, gt_training, self.tt)

        detection_dir = self.out_dir + 'res_training_det.txt'
        res_training = self.out_dir + 'res_training.txt'  # the result of the training data
        self.createTxt(detection_dir)
        self.createTxt(res_training)
        self.copyLines(self.seq_index, 1, detection_dir, self.tt, 1)

        # Evaluating on the training data
        # motmetrics = open(metrics_dir, 'a')
        # print >> motmetrics, '*'*30, self.tt, '*'*30
        # print >> motmetrics, 'Training'
        self.evaluation(1, self.tt, detection_dir, res_training)
        print '     Time consuming:', (time.time() - start) / 60.0
        # cmd = 'python3 evaluation.py %s %s'%(gt_training, res_training)
        # (status, output) = commands.getstatusoutput(cmd)
        # print >> motmetrics, output
        # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
        # motmetrics.close()

        if self.tt < self.length:
            # Evaluating on the validation data
            start = time.time()
            print '     Validation'

            # The distant sequence
            head = self.length - self.tt + 1
            tail = self.length

            # The sequence nearby
            # head = self.tt
            # tail = 2*self.tt-1

            gt_valiadation = self.out_dir + 'gt_validation.txt'  # the gt of the validation data
            self.copyLines(self.seq_index, head, gt_valiadation, tail)

            detection_dir = self.out_dir + 'res_validation_det.txt'
            res_validation = self.out_dir + 'res_validation.txt'  # the result of the validation data
            self.createTxt(detection_dir)
            self.createTxt(res_validation)
            self.copyLines(self.seq_index, head, detection_dir, tail, 1)

            # motmetrics = open(metrics_dir, 'a')
            # print >> motmetrics, 'Validation'
            self.evaluation(head, tail, detection_dir, res_validation)
            print '     Time consuming:', (time.time() - start) / 60.0
            # cmd = 'python3 evaluation.py %s %s'%(gt_valiadation, res_validation)
            # (status, output) = commands.getstatusoutput(cmd)
            # print >> motmetrics, output
            # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
            # motmetrics.close()
        else:
            # Evaluating on the validation data
            for seq in seqs:
                if seq == self.seq_index:
                    continue
                print '     %02d_Validation' % seq
                start = time.time()
                seq_dir = 'MOT16/train/MOT%d-%02d' % (year, seq)
                self.train_set = DatasetFromFolder(seq_dir)
                gt_seq = self.out_dir + 'gt_%02d.txt' % seq
                seqL = self.copyLines(seq, 1, gt_seq)

                detection_dir = self.out_dir + 'res_%02d_det.txt' % seq
                c_validation = self.out_dir + 'res_%02d.txt' % seq
                self.createTxt(detection_dir)
                self.createTxt(c_validation)
                self.copyLines(seq, 1, detection_dir, tag=1)

                # motmetrics = open(metrics_dir, 'a')
                # print >> motmetrics, '%02d_validation'%seq
                self.evaluation(1, seqL, detection_dir, c_validation)
                print '     Time consuming:', (time.time() - start) / 60.0
示例#3
0
class GN():
    def __init__(self, seq_index, tt, length, cuda=True):
        '''
        Evaluating with the MotMetrics
        :param seq_index: the number of the sequence
        :param tt: train_test
        :param length: the number of frames which is used for training
        :param cuda: True - GPU, False - CPU
        '''
        self.seq_index = seq_index
        self.hungarian = Munkres()
        self.device = torch.device("cuda" if cuda else "cpu")
        self.tt = tt
        self.length = length
        self.missingCounter = 0
        self.sideConnection = 0

        print '     Loading the model...'
        self.loadModel()

        self.out_dir = t_dir + 'motmetrics/'
        if not os.path.exists(self.out_dir):
            os.mkdir(self.out_dir)
        else:
            deleteDir(self.out_dir)
            os.mkdir(self.out_dir)
        self.initOut()

    def initOut(self):
        start = time.time()
        print '     Loading Data...'
        print '     Training'
        self.train_set = DatasetFromFolder(sequence_dir)

        gt_training = self.out_dir + 'gt_training.txt'  # the gt of the training data
        self.copyLines(self.seq_index, 1, gt_training, self.tt)

        detection_dir = self.out_dir + 'res_training_det.txt'
        res_training = self.out_dir + 'res_training.txt'  # the result of the training data
        self.createTxt(detection_dir)
        self.createTxt(res_training)
        self.copyLines(self.seq_index, 1, detection_dir, self.tt, 1)

        # Evaluating on the training data
        # motmetrics = open(metrics_dir, 'a')
        # print >> motmetrics, '*'*30, self.tt, '*'*30
        # print >> motmetrics, 'Training'
        self.evaluation(1, self.tt, detection_dir, res_training)
        print '     Time consuming:', (time.time() - start) / 60.0
        # cmd = 'python3 evaluation.py %s %s'%(gt_training, res_training)
        # (status, output) = commands.getstatusoutput(cmd)
        # print >> motmetrics, output
        # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
        # motmetrics.close()

        if self.tt < self.length:
            # Evaluating on the validation data
            start = time.time()
            print '     Validation'

            # The distant sequence
            head = self.length - self.tt + 1
            tail = self.length

            # The sequence nearby
            # head = self.tt
            # tail = 2*self.tt-1

            gt_valiadation = self.out_dir + 'gt_validation.txt'  # the gt of the validation data
            self.copyLines(self.seq_index, head, gt_valiadation, tail)

            detection_dir = self.out_dir + 'res_validation_det.txt'
            res_validation = self.out_dir + 'res_validation.txt'  # the result of the validation data
            self.createTxt(detection_dir)
            self.createTxt(res_validation)
            self.copyLines(self.seq_index, head, detection_dir, tail, 1)

            # motmetrics = open(metrics_dir, 'a')
            # print >> motmetrics, 'Validation'
            self.evaluation(head, tail, detection_dir, res_validation)
            print '     Time consuming:', (time.time() - start) / 60.0
            # cmd = 'python3 evaluation.py %s %s'%(gt_valiadation, res_validation)
            # (status, output) = commands.getstatusoutput(cmd)
            # print >> motmetrics, output
            # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
            # motmetrics.close()
        else:
            # Evaluating on the validation data
            for seq in seqs:
                if seq == self.seq_index:
                    continue
                print '     %02d_Validation' % seq
                start = time.time()
                seq_dir = 'MOT16/train/MOT%d-%02d' % (year, seq)
                self.train_set = DatasetFromFolder(seq_dir)
                gt_seq = self.out_dir + 'gt_%02d.txt' % seq
                seqL = self.copyLines(seq, 1, gt_seq)

                detection_dir = self.out_dir + 'res_%02d_det.txt' % seq
                c_validation = self.out_dir + 'res_%02d.txt' % seq
                self.createTxt(detection_dir)
                self.createTxt(c_validation)
                self.copyLines(seq, 1, detection_dir, tag=1)

                # motmetrics = open(metrics_dir, 'a')
                # print >> motmetrics, '%02d_validation'%seq
                self.evaluation(1, seqL, detection_dir, c_validation)
                print '     Time consuming:', (time.time() - start) / 60.0
                # cmd = 'python3 evaluation.py %s %s'%(gt_seq, c_validation)
                # (status, output) = commands.getstatusoutput(cmd)
                # print >> motmetrics, output
                # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
                # motmetrics.close()

    def getSeqL(self, info):
        # get the length of the sequence
        f = open(info, 'r')
        f.readline()
        for line in f.readlines():
            line = line.strip().split('=')
            if line[0] == 'seqLength':
                seqL = int(line[1])
        f.close()
        return seqL

    def copyLines(self, seq, head, gt_seq, tail=-1, tag=0):
        '''
        Copy the groun truth within [head, head+num]
        :param seq: the number of the sequence
        :param head: the head frame number
        :param tail: the number the clipped sequence
        :param gt_seq: the dir of the output file
        :return: None
        '''
        basic_dir = 'MOT16/train/MOT%d-%02d/' % (year, seq)
        seqL = tail if tail != -1 else self.getSeqL(basic_dir + 'seqinfo.ini')

        det_dir = 'gt/gt_det.txt' if test_gt_det else 'det/det.txt'
        seq_dir = basic_dir + ('gt/gt.txt' if tag == 0 else det_dir)
        inStream = open(seq_dir, 'r')

        outStream = open(gt_seq, 'w')
        for line in inStream.readlines():
            line = line.strip()
            attrs = line.split(',')
            f_num = int(attrs[0])
            if f_num >= head and f_num <= seqL:
                print >> outStream, line
        outStream.close()

        inStream.close()
        return seqL

    def createTxt(self, out_file):
        f = open(out_file, 'w')
        f.close()

    def loadModel(self):
        # self.Uphi = torch.load(t_dir+'uphi.pth').to(self.device)
        # self.Ephi = torch.load(t_dir+'ephi.pth').to(self.device)
        # self.u = torch.load(t_dir+'u.pth')
        # self.u = self.u.to(self.device)

        self.Uphi = torch.load('Results/MOT16/IoU/all/uphi_13.pth').to(
            self.device)
        self.Ephi = torch.load('Results/MOT16/IoU/all/ephi_13.pth').to(
            self.device)
        self.u = torch.load('Results/MOT16/IoU/all/u_13.pth')
        self.u = self.u.to(self.device)

    def swapFC(self):
        self.cur = self.cur ^ self.nxt
        self.nxt = self.cur ^ self.nxt
        self.cur = self.cur ^ self.nxt

    def linearModel(self, out, attr1, attr2):
        # print 'I got you! *.*'
        t = attr1[-1]
        self.sideConnection += 1
        if t > f_gap:
            return
        frame = int(attr1[0])
        x1, y1, w1, h1 = float(attr1[2]), float(attr1[3]), float(
            attr1[4]), float(attr1[5])
        x2, y2, w2, h2 = float(attr2[2]), float(attr2[3]), float(
            attr2[4]), float(attr2[5])

        x_delta = (x2 - x1) / t
        y_delta = (y2 - y1) / t
        w_delta = (w2 - w1) / t
        h_delta = (h2 - h1) / t

        for i in xrange(1, t):
            frame += 1
            x1 += x_delta
            y1 += y_delta
            w1 += w_delta
            h1 += h_delta
            attr1[0] = str(frame)
            attr1[2] = str(x1)
            attr1[3] = str(y1)
            attr1[4] = str(w1)
            attr1[5] = str(h1)
            line = ''
            for attr in attr1[:-1]:
                line += attr + ','
            line = line[:-1]
            print >> out, line
        self.missingCounter += t - 1

    def evaluation(self, head, tail, gtFile, outFile):
        '''
        Evaluation on dets
        :param head: the head frame number
        :param tail: the tail frame number
        :param gtFile: the ground truth file name
        :param outFile: the name of output file
        :return: None
        '''
        gtIn = open(gtFile, 'r')
        self.cur, self.nxt = 0, 1
        line_con = [[], []]
        id_con = [[], []]
        id_step = 1

        step = head + self.train_set.setBuffer(head)
        while step < tail:
            t_gap = self.train_set.loadNext()
            step += t_gap
            # print head+step, 'F',

            u_ = self.Uphi(self.train_set.E, self.train_set.V, self.u)

            # print 'Fo'
            m = self.train_set.m
            n = self.train_set.n
            if n == 0:
                print 'There is no detection in the rest of sequence!'
                break

            if id_step == 1:
                out = open(outFile, 'a')
                i = 0
                while i < m:
                    attrs = gtIn.readline().strip().split(',')
                    if float(attrs[6]) >= tau_conf_score:
                        attrs.append(1)
                        attrs[1] = str(id_step)
                        line = ''
                        for attr in attrs[:-1]:
                            line += attr + ','
                        line = line[:-1]
                        print >> out, line
                        line_con[self.cur].append(attrs)
                        id_con[self.cur].append(id_step)
                        id_step += 1
                        i += 1
                out.close()

            i = 0
            while i < n:
                attrs = gtIn.readline().strip().split(',')
                if float(attrs[6]) >= tau_conf_score:
                    attrs.append(1)
                    line_con[self.nxt].append(attrs)
                    id_con[self.nxt].append(-1)
                    i += 1

            # update the edges
            # print 'T',
            ret = self.train_set.getRet()
            for edge in self.train_set.candidates:
                e, vs_index, vr_index = edge
                if ret[vs_index][vr_index] == 1.0:
                    continue
                e = e.to(self.device).view(1, -1)
                v1 = self.train_set.getMotion(1, vs_index)
                v2 = self.train_set.getMotion(0, vr_index, vs_index)
                e_ = self.Ephi(e, v1, v2, u_)
                self.train_set.edges[vs_index][vr_index] = e_.data.view(-1)
                tmp = F.softmax(e_)
                tmp = tmp.cpu().data.numpy()[0]
                ret[vs_index][vr_index] = float(tmp[0])

            # self.train_set.showE(outFile)

            # for j in ret:
            #     print j
            results = self.hungarian.compute(ret)

            out = open(outFile, 'a')
            for (i, j) in results:
                # print (i,j)
                if ret[i][j] >= tau_threshold:
                    continue
                id = id_con[self.cur][i]
                id_con[self.nxt][j] = id
                attr1 = line_con[self.cur][i]
                attr2 = line_con[self.nxt][j]
                # print attrs
                attr2[1] = str(id)
                if attr1[-1] > 1:
                    # for the missing detections
                    self.linearModel(out, attr1, attr2)
                line = ''
                for attr in attr2[:-1]:
                    line += attr + ','
                line = line[:-1]
                print >> out, line

            for i in xrange(n):
                if id_con[self.nxt][i] == -1:
                    id_con[self.nxt][i] = id_step
                    attrs = line_con[self.nxt][i]
                    attrs[1] = str(id_step)
                    line = ''
                    for attr in attrs[:-1]:
                        line += attr + ','
                    line = line[:-1]
                    print >> out, line
                    id_step += 1
            out.close()

            self.train_set.getVelocity(results)

            index = 0
            for (i, j) in results:
                while i != index:
                    attrs = line_con[self.cur][index]
                    # print '*', attrs, '*'
                    if attrs[-1] + t_gap <= gap:
                        attrs[-1] += t_gap
                        line_con[self.nxt].append(attrs)
                        id_con[self.nxt].append(id_con[self.cur][index])
                        self.train_set.moveMotion(index)
                    index += 1
                index += 1
            while index < m:
                attrs = line_con[self.cur][index]
                # print '*', attrs, '*'
                if attrs[-1] + t_gap <= gap:
                    attrs[-1] += t_gap
                    line_con[self.nxt].append(attrs)
                    id_con[self.nxt].append(id_con[self.cur][index])
                    self.train_set.moveMotion(index)
                index += 1

            line_con[self.cur] = []
            id_con[self.cur] = []
            # print head+step, results
            self.train_set.swapFC()
            self.swapFC()
        gtIn.close()
示例#4
0
class GN():
    def __init__(self, seq_index, tt, cuda=True):
        '''
        Evaluating with the MotMetrics
        :param seq_index: the number of the sequence
        :param tt: train_test
        :param length: the number of frames which is used for training
        :param cuda: True - GPU, False - CPU
        '''
        self.bbx_counter = 0
        self.seq_index = seq_index
        self.hungarian = Munkres()
        self.device = torch.device("cuda" if cuda else "cpu")
        self.tt = tt
        self.missingCounter = 0
        self.sideConnection = 0

        print '     Loading the model...'
        self.loadModel()

        self.out_dir = t_dir + 'motmetrics_%s/' % type

        if not os.path.exists(self.out_dir):
            os.mkdir(self.out_dir)
        else:
            deleteDir(self.out_dir)
            os.mkdir(self.out_dir)
        self.initOut()

    def initOut(self):
        print '     Loading Data...'
        self.train_set = DatasetFromFolder(
            sequence_dir, '../MOT/MOT16/test/MOT16-%02d' % self.seq_index)

        detection_dir = self.out_dir + 'res_training_det.txt'
        res_training = self.out_dir + 'res_training.txt'  # the result of the training data
        self.createTxt(detection_dir)
        self.createTxt(res_training)
        self.copyLines(self.seq_index, 1, detection_dir, self.tt, 1)

        self.evaluation(1, self.tt, detection_dir, res_training)

    def getSeqL(self, info):
        # get the length of the sequence
        f = open(info, 'r')
        f.readline()
        for line in f.readlines():
            line = line.strip().split('=')
            if line[0] == 'seqLength':
                seqL = int(line[1])
        f.close()
        return seqL

    def copyLines(self, seq, head, gt_seq, tail=-1, tag=0):
        '''
        Copy the groun truth within [head, head+num]
        :param seq: the number of the sequence
        :param head: the head frame number
        :param tail: the number the clipped sequence
        :param gt_seq: the dir of the output file
        :return: None
        '''
        if tt_tag:
            basic_dir = '../MOT/MOT%d/test/MOT%d-%02d-%s/' % (year, year, seq,
                                                              type)
        else:
            basic_dir = '../MOT/MOT%d/train/MOT%d-%02d-%s/' % (year, year, seq,
                                                               type)
        print '     Testing on', basic_dir, 'Length:', self.tt
        seqL = tail if tail != -1 else self.getSeqL(basic_dir + 'seqinfo.ini')

        det_dir = 'gt/gt_det.txt' if test_gt_det else 'det/det.txt'
        seq_dir = basic_dir + ('gt/gt.txt' if tag == 0 else det_dir)
        inStream = open(seq_dir, 'r')

        outStream = open(gt_seq, 'w')
        for line in inStream.readlines():
            line = line.strip()
            attrs = line.split(',')
            f_num = int(attrs[0])
            if f_num >= head and f_num <= seqL:
                print >> outStream, line
        outStream.close()

        inStream.close()
        return seqL

    def createTxt(self, out_file):
        f = open(out_file, 'w')
        f.close()

    def loadModel(self):
        name = 'all_7'

        if edge_initial == 1:
            i_name = 'Random/'
        elif edge_initial == 0:
            i_name = 'IoU/'

        self.Uphi = torch.load('Results/MOT16/%s/%s/uphi_13.pth' %
                               (i_name, name)).to(self.device)
        self.Ephi = torch.load('Results/MOT16/%s/%s/ephi_13.pth' %
                               (i_name, name)).to(self.device)
        self.u = torch.load('Results/MOT16/%s/%s/u_13.pth' % (i_name, name))
        self.u = self.u.to(self.device)

    def swapFC(self):
        self.cur = self.cur ^ self.nxt
        self.nxt = self.cur ^ self.nxt
        self.cur = self.cur ^ self.nxt

    def linearModel(self, out, attr1, attr2):
        # print 'I got you! *.*'
        t = attr1[-1]
        self.sideConnection += 1
        if t > f_gap:
            return
        frame = int(attr1[0])
        x1, y1, w1, h1 = float(attr1[2]), float(attr1[3]), float(
            attr1[4]), float(attr1[5])
        x2, y2, w2, h2 = float(attr2[2]), float(attr2[3]), float(
            attr2[4]), float(attr2[5])

        x_delta = (x2 - x1) / t
        y_delta = (y2 - y1) / t
        w_delta = (w2 - w1) / t
        h_delta = (h2 - h1) / 2

        for i in xrange(1, t):
            frame += 1
            x1 += x_delta
            y1 += y_delta
            w1 += w_delta
            h1 += h_delta
            attr1[0] = str(frame)
            attr1[2] = str(x1)
            attr1[3] = str(y1)
            attr1[4] = str(w1)
            attr1[5] = str(h1)
            line = ''
            for attr in attr1[:-1]:
                line += attr + ','
            if show_recovering:
                line += '1'
            else:
                line = line[:-1]
            print >> out, line
            self.bbx_counter += 1
        self.missingCounter += t - 1

    def evaluation(self, head, tail, gtFile, outFile):
        '''
        Evaluation on dets
        :param head: the head frame number
        :param tail: the tail frame number
        :param gtFile: the ground truth file name
        :param outFile: the name of output file
        :return: None
        '''
        gtIn = open(gtFile, 'r')
        self.cur, self.nxt = 0, 1
        line_con = [[], []]
        id_con = [[], []]
        id_step = 1

        step = head + self.train_set.setBuffer(head)
        while step < tail:
            # print '*********************************'
            t_gap = self.train_set.loadNext()
            step += t_gap
            # print head+step, 'F',

            u_ = self.Uphi(self.train_set.E, self.train_set.V, self.u)

            # print 'Fo'
            m = self.train_set.m
            n = self.train_set.n
            # print 'm = %d, n = %d'%(m, n)
            if n == 0:
                print 'There is no detection in the rest of sequence!'
                break

            if id_step == 1:
                out = open(outFile, 'a')
                i = 0
                while i < m:
                    attrs = gtIn.readline().strip().split(',')
                    if float(attrs[6]) >= tau_conf_score:
                        attrs.append(1)
                        attrs[1] = str(id_step)
                        line = ''
                        for attr in attrs[:-1]:
                            line += attr + ','
                        if show_recovering:
                            line += '0'
                        else:
                            line = line[:-1]
                        print >> out, line
                        self.bbx_counter += 1
                        line_con[self.cur].append(attrs)
                        id_con[self.cur].append(id_step)
                        id_step += 1
                        i += 1
                out.close()

            i = 0
            while i < n:
                attrs = gtIn.readline().strip().split(',')
                if float(attrs[6]) >= tau_conf_score:
                    attrs.append(1)
                    line_con[self.nxt].append(attrs)
                    id_con[self.nxt].append(-1)
                    i += 1

            # update the edges
            # print 'T',
            ret = self.train_set.getRet()
            for edge in self.train_set.candidates:
                e, vs_index, vr_index = edge
                if ret[vs_index][vr_index] == 1.0:
                    continue
                e = e.to(self.device).view(1, -1)
                v1 = self.train_set.getMApp(1, vs_index).to(self.device)
                v2 = self.train_set.getMApp(
                    0, vr_index, vs_index,
                    line_con[self.cur][vs_index][-1]).to(self.device)
                e_ = self.Ephi(e, v1, v2, u_)
                self.train_set.edges[vs_index][vr_index] = e_.data.view(-1)
                tmp = F.softmax(e_)
                tmp = tmp.cpu().data.numpy()[0]
                ret[vs_index][vr_index] = float(tmp[0])

            # self.train_set.showE(outFile)

            # for j in ret:
            #     print j
            results = self.hungarian.compute(ret)

            out = open(outFile, 'a')
            look_up = set(j for j in xrange(n))
            for (i, j) in results:
                # print (i,j)
                if ret[i][j] >= tau_threshold:
                    continue
                look_up.remove(j)
                self.train_set.updateVelocity(i, j, line_con[self.cur][i][-1],
                                              False)

                id = id_con[self.cur][i]
                id_con[self.nxt][j] = id
                attr1 = line_con[self.cur][i]
                attr2 = line_con[self.nxt][j]
                # print attrs
                attr2[1] = str(id)
                if attr1[-1] > 1:
                    # for the missing detections
                    self.linearModel(out, attr1, attr2)
                line = ''
                for attr in attr2[:-1]:
                    line += attr + ','
                if show_recovering:
                    line += '0'
                else:
                    line = line[:-1]
                print >> out, line
                self.bbx_counter += 1

            for j in look_up:
                self.train_set.updateVelocity(-1, j, tag=False)

            for i in xrange(n):
                if id_con[self.nxt][i] == -1:
                    id_con[self.nxt][i] = id_step
                    attrs = line_con[self.nxt][i]
                    attrs[1] = str(id_step)
                    line = ''
                    for attr in attrs[:-1]:
                        line += attr + ','
                    if show_recovering:
                        line += '0'
                    else:
                        line = line[:-1]
                    print >> out, line
                    self.bbx_counter += 1
                    id_step += 1
            out.close()

            # For missing & Occlusion
            index = 0
            for (i, j) in results:
                while i != index:
                    attrs = line_con[self.cur][index]
                    # print '*', attrs, '*'
                    if attrs[-1] + t_gap <= gap:
                        attrs[-1] += t_gap
                        line_con[self.nxt].append(attrs)
                        id_con[self.nxt].append(id_con[self.cur][index])
                        self.train_set.moveMApp(index)
                    index += 1

                if ret[i][j] >= tau_threshold:
                    attrs = line_con[self.cur][index]
                    # print '*', attrs, '*'
                    if attrs[-1] + t_gap <= gap:
                        attrs[-1] += t_gap
                        line_con[self.nxt].append(attrs)
                        id_con[self.nxt].append(id_con[self.cur][index])
                        self.train_set.moveMApp(index)
                index += 1
            while index < m:
                attrs = line_con[self.cur][index]
                # print '*', attrs, '*'
                if attrs[-1] + t_gap <= gap:
                    attrs[-1] += t_gap
                    line_con[self.nxt].append(attrs)
                    id_con[self.nxt].append(id_con[self.cur][index])
                    self.train_set.moveMApp(index)
                index += 1

            # con = self.train_set.cleanEdge()
            # for i in xrange(len(con)-1, -1, -1):
            #     index = con[i]
            #     del line_con[self.nxt][index]
            #     del id_con[self.nxt][index]

            line_con[self.cur] = []
            id_con[self.cur] = []
            # print head+step, results
            self.train_set.swapFC()
            self.swapFC()
        gtIn.close()
        print '     The results:', id_step, self.bbx_counter
示例#5
0
class GN():
    def __init__(self, seq_index, tt, length, cuda=True):
        '''
        Evaluating with the MotMetrics
        :param seq_index: the number of the sequence
        :param tt: train_test
        :param length: the number of frames which is used for training
        :param cuda: True - GPU, False - CPU
        '''

        # top index: 0 - correct matching, 1 - false matching, second index: 0 - min, 1 - max, 2 - total, 3 - counter
        self.ctau = [[1.0, 0.0, 0.0, 0] for i in xrange(2)]  # get the threshold for matching cost

        self.seq_index = seq_index
        self.hungarian = Munkres()
        self.device = torch.device("cuda" if cuda else "cpu")
        self.tt = tt
        self.length = length
        self.missingCounter = 0

        print '     Loading the model...'
        self.loadModel()

        self.out_dir = t_dir + 'motmetrics/'
        if not os.path.exists(self.out_dir):
            os.mkdir(self.out_dir)
        else:
            deleteDir(self.out_dir)
            os.mkdir(self.out_dir)
        self.initOut()

    def initOut(self):
        start = time.time()
        print '     Loading Data...'
        print '     Evaluating'
        self.train_set = DatasetFromFolder(sequence_dir)

        gt_training = self.out_dir + 'gt_training.txt'  # the gt of the training data
        self.copyLines(self.seq_index, 1, gt_training, self.tt)

        detection_dir = self.out_dir +'res_training_det.txt'
        res_training = self.out_dir + 'res_training.txt'  # the result of the training data
        self.createTxt(detection_dir)
        self.createTxt(res_training)
        self.copyLines(self.seq_index, 1, detection_dir, self.tt, 1)

        # Evaluating on the training data
        # motmetrics = open(metrics_dir, 'a')
        # print >> motmetrics, '*'*30, self.tt, '*'*30
        # print >> motmetrics, 'Training'
        self.evaluation(1, self.tt, detection_dir)
        print '     Time consuming:', (time.time()-start)/60.0
        # cmd = 'python3 evaluation.py %s %s'%(gt_training, res_training)
        # (status, output) = commands.getstatusoutput(cmd)
        # print >> motmetrics, output
        # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
        # motmetrics.close()

        if self.tt < self.length:
            # Evaluating on the validation data
            start = time.time()
            print '     Validation'

            # The distant sequence
            head = self.length - self.tt + 1
            tail = self.length

            # The sequence nearby
            # head = self.tt
            # tail = 2*self.tt-1

            gt_valiadation = self.out_dir + 'gt_validation.txt'  # the gt of the validation data
            self.copyLines(self.seq_index, head, gt_valiadation, tail)

            detection_dir = self.out_dir + 'res_validation_det.txt'
            res_validation = self.out_dir + 'res_validation.txt'  # the result of the validation data
            self.createTxt(detection_dir)
            self.createTxt(res_validation)
            self.copyLines(self.seq_index, head, detection_dir, tail, 1)

            # motmetrics = open(metrics_dir, 'a')
            # print >> motmetrics, 'Validation'
            self.evaluation(head, tail, detection_dir)
            print '     Time consuming:', (time.time()-start)/60.0
            # cmd = 'python3 evaluation.py %s %s'%(gt_valiadation, res_validation)
            # (status, output) = commands.getstatusoutput(cmd)
            # print >> motmetrics, output
            # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
            # motmetrics.close()
        else:
            # Evaluating on the validation data
            for seq in seqs:
                if seq == self.seq_index:
                    continue
                print '     %02d_Validation'%seq
                start = time.time()
                seq_dir = '../MOT/MOT16/train/MOT%d-%02d' % (year, seq)
                self.train_set = DatasetFromFolder(seq_dir)
                gt_seq = self.out_dir + 'gt_%02d.txt' % seq
                seqL = self.copyLines(seq, 1, gt_seq)

                detection_dir = self.out_dir + 'res_%02d_det.txt' % seq
                c_validation = self.out_dir + 'res_%02d.txt' % seq
                self.createTxt(detection_dir)
                self.createTxt(c_validation)
                self.copyLines(seq, 1, detection_dir, tag=1)

                # motmetrics = open(metrics_dir, 'a')
                # print >> motmetrics, '%02d_validation'%seq
                self.evaluation(1, seqL, detection_dir)
                print '     Time consuming:', (time.time()-start)/60.0
                # cmd = 'python3 evaluation.py %s %s'%(gt_seq, c_validation)
                # (status, output) = commands.getstatusoutput(cmd)
                # print >> motmetrics, output
                # print >> motmetrics, 'The time consuming:{}\n\n'.format((time.time()-start)/60)
                # motmetrics.close()

    def getSeqL(self, info):
        # get the length of the sequence
        f = open(info, 'r')
        f.readline()
        for line in f.readlines():
            line = line.strip().split('=')
            if line[0] == 'seqLength':
                seqL = int(line[1])
        f.close()
        return seqL

    def copyLines(self, seq, head, gt_seq, tail=-1, tag=0):
        '''
        Copy the groun truth within [head, head+num]
        :param seq: the number of the sequence
        :param head: the head frame number
        :param tail: the number the clipped sequence
        :param gt_seq: the dir of the output file
        :return: None
        '''
        basic_dir = '../MOT/MOT16/train/MOT%d-%02d/' % (year, seq)
        seqL = tail if tail != -1 else self.getSeqL(basic_dir + 'seqinfo.ini')

        det_dir = 'gt/gt_det.txt' if test_gt_det else 'det/det.txt'
        seq_dir = basic_dir + ('gt/gt.txt' if tag == 0 else det_dir)
        inStream = open(seq_dir, 'r')

        outStream = open(gt_seq, 'w')
        for line in inStream.readlines():
            line = line.strip()
            attrs = line.split(',')
            f_num = int(attrs[0])
            if f_num >= head and f_num <= seqL:
                print >> outStream, line
        outStream.close()

        inStream.close()
        return seqL

    def createTxt(self, out_file):
        f = open(out_file, 'w')
        f.close()

    def loadModel(self):
        name = 'all_4'
        self.Uphi = torch.load('Results/MOT16/IoU/%s/uphi_13.pth'%name).to(self.device)
        self.Ephi = torch.load('Results/MOT16/IoU/%s/ephi_13.pth'%name).to(self.device)
        self.u = torch.load('Results/MOT16/IoU/%s/u_13.pth'%name)
        self.u = self.u.to(self.device)

    def swapFC(self):
        self.cur = self.cur ^ self.nxt
        self.nxt = self.cur ^ self.nxt
        self.cur = self.cur ^ self.nxt

    def showCTau(self):
        for i in xrange(2):
            print '     Min:', self.ctau[i][0], 'Max:', self.ctau[i][1],
            if self.ctau[i][3]:
                print 'Mean:', self.ctau[i][2]/self.ctau[i][3]
            else:
                print 'Total:', self.ctau[i][2], 'Counter:', self.ctau[i][3]
        print '     Overall mean:', (self.ctau[0][2]+self.ctau[1][2])/(self.ctau[0][3]+self.ctau[1][3])

    def evaluation(self, head, tail, gtFile):
        '''
        Evaluation on dets
        :param head: the head frame number
        :param tail: the tail frame number
        :param gtFile: the ground truth file name
        :return: None
        '''
        gtIn = open(gtFile, 'r')
        self.cur, self.nxt = 0, 1
        line_con = [[], []]
        id_step = 1

        step = head + self.train_set.setBuffer(head)
        while step < tail:
            step += self.train_set.loadNext()
            # print head+step, 'F',

            u_ = self.Uphi(self.train_set.E, self.train_set.V, self.u)

            # print 'Fo'
            m = self.train_set.m
            n = self.train_set.n
            if n==0:
                print 'There is no detection in the rest of sequence!'
                break

            if id_step == 1:
                i = 0
                while i < m:
                    attrs = gtIn.readline().strip().split(',')
                    if float(attrs[6]) >= tau_conf_score:
                        line_con[self.cur].append(attrs)
                        id_step += 1
                        i += 1

            i = 0
            while i < n:
                attrs = gtIn.readline().strip().split(',')
                if float(attrs[6]) >= tau_conf_score:
                    attrs.append(1)
                    line_con[self.nxt].append(attrs)
                    i += 1

            # update the edges
            # print 'T',
            ret = self.train_set.getRet()
            for edge in self.train_set.candidates:
                e, vs_index, vr_index = edge
                e = e.to(self.device).view(1,-1)
                v1 = self.train_set.getMotion(1, vs_index).to(self.device)
                v2 = self.train_set.getMotion(0, vr_index, vs_index, 1).to(self.device)
                e_ = self.Ephi(e, v1, v2, u_)
                self.train_set.edges[vs_index][vr_index] = e_.data.view(-1)
                tmp = F.softmax(e_)
                tmp = tmp.cpu().data.numpy()[0]
                ret[vs_index][vr_index] = float(tmp[0])

            # self.train_set.showE(outFile)

            # for j in ret:
            #     print j

            results = set(j for j in xrange(n))
            for i in xrange(m):
                a_attrs = line_con[self.cur][i]
                for j in xrange(n):
                    index = 1
                    cost = ret[i][j]
                    # print a_attrs[1], line_con[self.nxt][j][1]
                    if a_attrs[1] == line_con[self.nxt][j][1]:
                        self.train_set.updateVelocity(i, j, 1)
                        results.remove(j)
                        index = 0
                    self.ctau[index][0] = min(self.ctau[index][0], cost)
                    self.ctau[index][1] = max(self.ctau[index][1], cost)
                    self.ctau[index][2] += cost
                    self.ctau[index][3] += 1

            for j in results:
                self.train_set.updateVelocity(-1, j)

            line_con[self.cur] = []
            # print head+step, results
            self.train_set.swapFC()
            self.swapFC()
        gtIn.close()

        # tra_tst = 'training sets' if head == 1 else 'validation sets'
        # out = open(outFile, 'a')
        # print >> out, tra_tst
        # out.close()
        self.showCTau()