예제 #1
0
파일: PyFlow.py 프로젝트: haeusser/caffe
    def plot(self):
        for ds in self._datasets:
            plot = OpticalFlow.EpePlot(ds)
            plot.addMethods(self._methods, self._args.type)
            if self._args.diff: plot.makeDiff()
            plot.setColorDef(self._args.colordef)
            plot.plot()

        plt.show()
예제 #2
0
파일: Dataset.py 프로젝트: haeusser/caffe
 def data(self, downsample = 1):
     if self._type == "image":
         Image = misc.imread(self._path)[:, :, 0:3]
         if downsample != 1: Image = tb.downsampleImage(Image, downsample)
         return Image[..., np.r_[2, 1, 0]].transpose((2, 0, 1))
     elif self._type == "flow":
         flow = tb.readFlow(self._path)
         flow = flow[:, :, 0:2]
         if downsample != 1: flow = OpticalFlow.downsampleMedian(flow, downsample)
         flow = flow.transpose((2, 0, 1))
         nanMask = np.isnan(flow)
         data = (flow * 32.0).astype(np.int16)
         data[nanMask] = np.iinfo(np.int16).max
         return data
     elif self._type == "leftdisparity":
         disparity = tb.readDisparity(self._path)
         nanMask = np.isnan(disparity)
         disparity *= -1
         if downsample != 1: raise Exception("no downsampling implemented for disparity")
         data = (disparity * 32.0).astype(np.int16)
         data[nanMask] = np.iinfo(np.int16).max
         return data
     elif self._type == "rightdisparity":
         disparity = tb.readDisparity(self._path)
         nanMask = np.isnan(disparity)
         if downsample != 1: raise Exception("no downsampling implemented for disparity")
         data = (disparity * 32.0).astype(np.int16)
         data[nanMask] = np.iinfo(np.int16).max
         return data
     elif self._type == "leftdisparitychange":
         disparityChange = tb.readDisparity(self._path)
         nanMask = np.isnan(disparityChange)
         disparityChange *= -1
         if downsample != 1: raise Exception("no downsampling implemented for disparity")
         data = (disparityChange * 32.0).astype(np.int16)
         data[nanMask] = np.iinfo(np.int16).max
         return data
     elif self._type == "rightdisparitychange":
         disparityChange = tb.readDisparity(self._path)
         nanMask = np.isnan(disparityChange)
         if downsample != 1: raise Exception("no downsampling implemented for disparity")
         data = (disparityChange * 32.0).astype(np.int16)
         data[nanMask] = np.iinfo(np.int16).max
         return data
     else:
         raise Exception('unhandled data type')
예제 #3
0
    def computeJobImplementation(self, job):
        args = []
        args.append('epicflow')
        args.append(self.img1Path())
        args.append(self.img2Path())
        args.append(
            self._ent.inFile(
                OpticalFlow.Specification('sed'),
                'boundary.bin')[0 if self.spec().direction() == '+' else 1])
        args.append(self._ent.bind(self.spec().inputs()[0]).file('flow.match'))
        args.append('flow.flo')

        if self._ent.dsName().startswith('sintel'): args.append('-sintel')
        if self._ent.dsName().startswith('kitti'): args.append('-kitti')
        if self._ent.dsName().startswith('middlebury'):
            args.append('-middlebury')

        job.addCommand(self.path(), ' '.join(args))
예제 #4
0
파일: PyFlow.py 프로젝트: haeusser/caffe
    def __init__(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('--format', help='id formatting', default='%04d')
        parser.add_argument('--verbose', help='verbose', action='store_true')
        parser.add_argument('--label',
                            help='restrict to label',
                            type=int,
                            default=-1)
        parser.add_argument('--limit',
                            help='limit number of entities',
                            type=str,
                            default='')
        parser.add_argument('--id', help='restrict to id', type=int)

        subparsers = parser.add_subparsers(dest='command')

        # datasets
        datasets_parser = subparsers.add_parser('datasets')

        # uents
        uents_parser = subparsers.add_parser('uents')
        uents_parser.add_argument('datasets', help='datasets')

        # bents
        bents_parser = subparsers.add_parser('bents')
        bents_parser.add_argument('datasets', help='datasets')

        # compute
        compute_parser = subparsers.add_parser('compute')
        compute_parser.add_argument('methods', help='method definitions')
        compute_parser.add_argument('datasets', help='datasets')
        compute_parser.add_argument('--local',
                                    help='do not compute on cluster',
                                    action='store_true')
        compute_parser.add_argument('--cores',
                                    help='run on multiple cores',
                                    default=1,
                                    type=int)

        # update
        update_parser = subparsers.add_parser('update')
        update_parser.add_argument('methods', help='method definitions')
        update_parser.add_argument('datasets', help='datasets')
        update_parser.add_argument('--local',
                                   help='do not compute on cluster',
                                   action='store_true')
        update_parser.add_argument('--cores',
                                   help='run on multiple cores',
                                   default=1,
                                   type=int)

        # check
        check_parser = subparsers.add_parser('check')
        check_parser.add_argument('methods', help='method definition')
        check_parser.add_argument('datasets', help='datasets')

        # clean
        clean_parser = subparsers.add_parser('clean')
        clean_parser.add_argument('methods', help='method definition')
        clean_parser.add_argument('datasets', help='datasets')

        #
        # integrate_parser = subparsers.add_parser('integrate')
        # integrate_parser.add_argument('method', help='method definition')
        # integrate_parser.add_argument('--inext', help='input extension', default='')
        # integrate_parser.add_argument('--outext', help='output extension', default='')
        #
        # # integrate
        # integrate_parser = subparsers.add_parser('integrate')
        # integrate_parser.add_argument('method', help='method definition')
        # integrate_parser.add_argument('--inext', help='input extension', default='')
        # integrate_parser.add_argument('--outext', help='output extension', default='')
        #

        # epe
        epe_parser = subparsers.add_parser('epe')
        epe_parser.add_argument('methods', help='method definition')
        epe_parser.add_argument('datasets', help='datasets')
        epe_parser.add_argument('--type', help='type of epe', default='all')
        epe_parser.add_argument('--make-epe',
                                help='compute epe',
                                action='store_true')
        epe_parser.add_argument('--make-stat',
                                help='compute stat',
                                action='store_true')
        epe_parser.add_argument('--refresh',
                                help='refresh',
                                action='store_true')
        epe_parser.add_argument('--no-output',
                                help='no output',
                                action='store_true')
        epe_parser.add_argument('--list',
                                help='list errors',
                                action='store_true')
        epe_parser.add_argument('--plain',
                                help='plain output',
                                action='store_true')

        # epe-stat
        epe_stat_parser = subparsers.add_parser('epe-stat')
        epe_stat_parser.add_argument('methods', help='method definition')
        epe_stat_parser.add_argument('datasets', help='datasets')
        epe_stat_parser.add_argument('--type',
                                     help='type of epe',
                                     default='all')
        epe_stat_parser.add_argument('--make-epe',
                                     help='compute epe',
                                     action='store_true')
        epe_stat_parser.add_argument('--make-stat',
                                     help='compute stat',
                                     action='store_true')
        epe_stat_parser.add_argument('--refresh',
                                     help='refresh',
                                     action='store_true')

        # plot
        plot_parser = subparsers.add_parser('plot')
        plot_parser.add_argument('methods', help='method definition')
        plot_parser.add_argument('datasets', help='datasets')
        plot_parser.add_argument('colordef',
                                 help='color definition',
                                 default='',
                                 nargs='?')
        plot_parser.add_argument('--type', help='type of epe', default='all')
        plot_parser.add_argument('--diff',
                                 help='plot difference',
                                 action='store_true')

        self._args = parser.parse_args()
        OpticalFlow.UnaryEntity.format = self._args.format
        OpticalFlow.BinaryEntity.format = self._args.format
        tb.verbose = self._args.verbose

        self._datasets = []
        if 'datasets' in self._args:
            self._datasets = OpticalFlow.expandDataset(self._args.datasets,
                                                       self._args.label,
                                                       self._args.limit)

        self._methods = []
        if 'methods' in self._args:
            self._methods = OpticalFlow.Specification.expand(
                self._args.methods)
예제 #5
0
파일: PyFlow.py 프로젝트: haeusser/caffe
    def __init__(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("--format", help="id formatting", default="%04d")
        parser.add_argument("--verbose", help="verbose", action="store_true")
        parser.add_argument("--label", help="restrict to label", type=int, default=-1)
        parser.add_argument("--limit", help="limit number of entities", type=str, default="")
        parser.add_argument("--id", help="restrict to id", type=int)

        subparsers = parser.add_subparsers(dest="command")

        # datasets
        datasets_parser = subparsers.add_parser("datasets")

        # uents
        uents_parser = subparsers.add_parser("uents")
        uents_parser.add_argument("datasets", help="datasets")

        # bents
        bents_parser = subparsers.add_parser("bents")
        bents_parser.add_argument("datasets", help="datasets")

        # compute
        compute_parser = subparsers.add_parser("compute")
        compute_parser.add_argument("methods", help="method definitions")
        compute_parser.add_argument("datasets", help="datasets")
        compute_parser.add_argument("--local", help="do not compute on cluster", action="store_true")
        compute_parser.add_argument("--cores", help="run on multiple cores", default=1, type=int)

        # update
        update_parser = subparsers.add_parser("update")
        update_parser.add_argument("methods", help="method definitions")
        update_parser.add_argument("datasets", help="datasets")
        update_parser.add_argument("--local", help="do not compute on cluster", action="store_true")
        update_parser.add_argument("--cores", help="run on multiple cores", default=1, type=int)

        # check
        check_parser = subparsers.add_parser("check")
        check_parser.add_argument("methods", help="method definition")
        check_parser.add_argument("datasets", help="datasets")

        # clean
        clean_parser = subparsers.add_parser("clean")
        clean_parser.add_argument("methods", help="method definition")
        clean_parser.add_argument("datasets", help="datasets")

        #
        # integrate_parser = subparsers.add_parser('integrate')
        # integrate_parser.add_argument('method', help='method definition')
        # integrate_parser.add_argument('--inext', help='input extension', default='')
        # integrate_parser.add_argument('--outext', help='output extension', default='')
        #
        # # integrate
        # integrate_parser = subparsers.add_parser('integrate')
        # integrate_parser.add_argument('method', help='method definition')
        # integrate_parser.add_argument('--inext', help='input extension', default='')
        # integrate_parser.add_argument('--outext', help='output extension', default='')
        #

        # epe
        epe_parser = subparsers.add_parser("epe")
        epe_parser.add_argument("methods", help="method definition")
        epe_parser.add_argument("datasets", help="datasets")
        epe_parser.add_argument("--type", help="type of epe", default="all")
        epe_parser.add_argument("--make-epe", help="compute epe", action="store_true")
        epe_parser.add_argument("--make-stat", help="compute stat", action="store_true")
        epe_parser.add_argument("--refresh", help="refresh", action="store_true")
        epe_parser.add_argument("--no-output", help="no output", action="store_true")
        epe_parser.add_argument("--list", help="list errors", action="store_true")
        epe_parser.add_argument("--plain", help="plain output", action="store_true")

        # epe-stat
        epe_stat_parser = subparsers.add_parser("epe-stat")
        epe_stat_parser.add_argument("methods", help="method definition")
        epe_stat_parser.add_argument("datasets", help="datasets")
        epe_stat_parser.add_argument("--type", help="type of epe", default="all")
        epe_stat_parser.add_argument("--make-epe", help="compute epe", action="store_true")
        epe_stat_parser.add_argument("--make-stat", help="compute stat", action="store_true")
        epe_stat_parser.add_argument("--refresh", help="refresh", action="store_true")

        # plot
        plot_parser = subparsers.add_parser("plot")
        plot_parser.add_argument("methods", help="method definition")
        plot_parser.add_argument("datasets", help="datasets")
        plot_parser.add_argument("colordef", help="color definition", default="", nargs="?")
        plot_parser.add_argument("--type", help="type of epe", default="all")
        plot_parser.add_argument("--diff", help="plot difference", action="store_true")

        self._args = parser.parse_args()
        OpticalFlow.UnaryEntity.format = self._args.format
        OpticalFlow.BinaryEntity.format = self._args.format
        tb.verbose = self._args.verbose

        self._datasets = []
        if "datasets" in self._args:
            self._datasets = OpticalFlow.expandDataset(self._args.datasets, self._args.label, self._args.limit)

        self._methods = []
        if "methods" in self._args:
            self._methods = OpticalFlow.Specification.expand(self._args.methods)