def run(self):
        ae_res = []
        stability_res = []

        # Cross Validation loop
        for i in range(len(self.folds)):
            # get first fold #i info
            fold = self.folds[i]

            # set train, validation and test sets for this fold iteration
            self.args.trainfiles = fold.train
            self.args.valfile = fold.validation
            self.args.testfile = fold.test

            # all training logic is inside Worker (core/worker.py)
            worker = Worker(i, self.conf, self.data_conf, self.cache, self.args,
                            verbose=self.verbose, inference=self.inference)
            res, _ = worker.run()

            # print results for each fold
            if self.verbose:
                partial_res = summary_angular_errors([r.error for r in res])
                self._print_results(partial_res, 'fold '+str(i))

            # accumulate results into lists
            ae_res += res

        # summary_angular_errors: computes mean, median, best 25%, etc...
        results = summary_angular_errors([r.error for r in ae_res])
        if self.verbose:
            self._print_results(results, 'total')

        return results
Example #2
0
def main():
    args = parser.parse_args()

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # load configuration file: epochs, loss function, etc... for this experiment
    with open(args.configurationfile, 'r') as f:
        conf = json.load(f)

    # load datapath file: paths specific to the current machine
    with open(args.datapath, 'r') as f:
        data_conf = json.load(f)

    # remove previous results
    output_dir = os.path.join(args.outputfolder, args.dataset, args.subdataset,
                              conf['name'])
    if not args.evaluate and not args.resume:
        if os.path.exists(output_dir):
            shutil.rmtree(output_dir)

    # create output folder
    os.makedirs(output_dir, exist_ok=True)
    args.outputfolder = output_dir

    # copy configuration file to output folder
    shutil.copy(
        args.configurationfile,
        os.path.join(output_dir, os.path.basename(args.configurationfile)))

    # we overwrite the stdout and stderr (standard output and error) to
    # files in the output directory
    sys.stdout = PrintLogger(os.path.join(output_dir, 'stdout.txt'),
                             sys.stdout)
    sys.stderr = PrintLogger(os.path.join(output_dir, 'stderr.txt'),
                             sys.stderr)

    fold = 0  # no folds, but we always use fold #0 for these experiments
    cache = CacheManager(conf)
    worker = Worker(fold, conf, data_conf, cache, args)
    res, _ = worker.run()

    # some datasets have no validation GT
    if len(res) > 0:
        # print angular errors statistics (mean, median, etc...)
        generate_results(res, 'test')
Example #3
0
def main():
    args = parser.parse_args()

    # load configuration file for this experiment
    with open(args.configurationfile, 'r') as f:
        conf = json.load(f)

    # load datapath file: paths specific to the current machine
    with open(args.datapath, 'r') as f:
        data_conf = json.load(f)

    # create output folder
    os.makedirs(args.outputfolder, exist_ok=True)

    # copy configuration file to output folder
    shutil.copy(
        args.configurationfile,
        os.path.join(args.outputfolder,
                     os.path.basename(args.configurationfile)))

    # we overwrite the stdout and stderr (standard output and error) to
    # files in the output directory
    sys.stdout = PrintLogger(os.path.join(args.outputfolder, 'stdout.txt'),
                             sys.stdout)
    sys.stderr = PrintLogger(os.path.join(args.outputfolder, 'stderr.txt'),
                             sys.stderr)

    # used in core/worker.py to determine what to do
    args.evaluate = True
    args.resume = False
    args.valfile = None
    args.trainfiles = None

    # init the cache manager: this caches images from the dataset
    # to avoid reading them more than once
    cache = CacheManager(conf, no_cache=True)

    fold = 0  # no folds, but we always use fold #0 for these experiments
    worker = Worker(fold, conf, data_conf, cache, args, inference=True)
    res, _ = worker.run()

    # print angular errors statistics (mean, median, etc...)
    generate_results(res)
Example #4
0
def main():
    # Initialize worker
    worker = Worker()
    # Run worker
    worker.run()
def runWorker(url):
    log.info("Worker URL: " + url)
    worker = Worker()
    worker.run(url)
Example #6
0
def runWorker(url):
	log.info("Worker URL: " + url)
	worker = Worker()
	worker.run(url)