def __init__(self, command): Test.__init__(self) self.command = command self.env = {} if isinstance(self.command, basestring): self.command = shlex.split(str(self.command))
def __init__(self, filepath, runConcurrent = True): """ :filepath: Must end in one '.vert', '.geom', or '.frag'. """ Test.__init__(self, runConcurrent) self.__config = None self.__command = None self.__filepath = filepath self.result = None
def __init__(self, command): Test.__init__(self) self.command = command self.split_command = os.path.split(self.command[0])[1] self.env = {} if isinstance(self.command, basestring): self.command = shlex.split(str(self.command)) self.skip_test = self.check_for_skip_scenario(command)
def __init__(self, shader_runner_args, run_standalone=False): """run_standalone: Run the test outside the Python framework.""" Test.__init__(self, runConcurrent=True) assert(isinstance(shader_runner_args, list)) assert(isinstance(shader_runner_args[0], str) or \ isinstance(shader_runner_args[0], unicode)) self.__run_standalone = run_standalone self.__shader_runner_args = shader_runner_args self.__test_filepath = shader_runner_args[0] self.__result = None self.__command = None self.__gl_api = None self.env = {}
def __init__(self, shader_runner_args, run_standalone=False): """run_standalone: Run the test outside the Python framework.""" Test.__init__(self, runConcurrent=True) assert (isinstance(shader_runner_args, list)) assert (isinstance(shader_runner_args[0], str) or isinstance(shader_runner_args[0], unicode)) self.__run_standalone = run_standalone self.__shader_runner_args = shader_runner_args self.__test_filepath = shader_runner_args[0] self.__result = None self.__command = None self.__gl_api = None self.env = {}
def load_tests(self, tests_dir, name): core.log_open_sec(name.capitalize() + " tests") tests_subdir = os.path.join(tests_dir, name) tests = [] for td in os.listdir(tests_subdir): tests.append(Test(os.path.join(tests_subdir, td), td)) core.log_end_sec() return tests
def main(args): if args.training_type is 'Train': savefilename = osp.join(args.dataset1 + args.dataset2 + args.dataset3 + '1') elif args.training_type is 'Test': savefilename = osp.join( args.tstfile, args.tstdataset + 'to' + args.dataset_target + args.snapshotnum) args.seed = init_random_seed(args.manual_seed) if args.training_type in ['Train', 'Test']: summary_writer = SummaryWriter( osp.join(args.results_path, 'log', savefilename)) saver = Saver(args, savefilename) saver.print_config() ##################### load seed##################### #####################load datasets##################### if args.training_type is 'Train': data_loader1_real = get_dataset_loader(name=args.dataset1, getreal=True, batch_size=args.batchsize) data_loader1_fake = get_dataset_loader(name=args.dataset1, getreal=False, batch_size=args.batchsize) data_loader2_real = get_dataset_loader(name=args.dataset2, getreal=True, batch_size=args.batchsize) data_loader2_fake = get_dataset_loader(name=args.dataset2, getreal=False, batch_size=args.batchsize) data_loader3_real = get_dataset_loader(name=args.dataset3, getreal=True, batch_size=args.batchsize) data_loader3_fake = get_dataset_loader(name=args.dataset3, getreal=False, batch_size=args.batchsize) data_loader_target = get_tgtdataset_loader(name=args.dataset_target, batch_size=args.batchsize) elif args.training_type is 'Test': data_loader_target = get_tgtdataset_loader(name=args.dataset_target, batch_size=args.batchsize) ##################### load models##################### FeatExtmodel = models.create(args.arch_FeatExt) DepthEstmodel = models.create(args.arch_DepthEst) FeatEmbdmodel = models.create(args.arch_FeatEmbd, momentum=args.bn_momentum) if args.training_type is 'Train': FeatExt_restore = None DepthEst_restore = None FeatEmbd_restore = None elif args.training_type is 'Test': FeatExt_restore = osp.join('results', args.tstfile, 'snapshots', args.tstdataset, 'FeatExtor-' + args.snapshotnum + '.pt') FeatEmbd_restore = osp.join('results', args.tstfile, 'snapshots', args.tstdataset, 'FeatEmbder-' + args.snapshotnum + '.pt') DepthEst_restore = None else: raise NotImplementedError('method type [%s] is not implemented' % args.training_type) FeatExtor = init_model(net=FeatExtmodel, init_type=args.init_type, restore=FeatExt_restore, parallel_reload=True) DepthEstor = init_model(net=DepthEstmodel, init_type=args.init_type, restore=DepthEst_restore, parallel_reload=True) FeatEmbder = init_model(net=FeatEmbdmodel, init_type=args.init_type, restore=FeatEmbd_restore, parallel_reload=False) print(">>> FeatExtor <<<") print(FeatExtor) print(">>> DepthEstor <<<") print(DepthEstor) print(">>> FeatEmbder <<<") print(FeatEmbder) ##################### tarining models##################### if args.training_type == 'Train': Train(args, FeatExtor, DepthEstor, FeatEmbder, data_loader1_real, data_loader1_fake, data_loader2_real, data_loader2_fake, data_loader3_real, data_loader3_fake, data_loader_target, summary_writer, saver, savefilename) elif args.training_type in ['Test']: Test(args, FeatExtor, FeatEmbder, data_loader_target, savefilename) else: raise NotImplementedError('method type [%s] is not implemented' % args.training_type)