def run_test(dataset=None, epoch=-1, phase="test"): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle if dataset is None: dataset = CreateDataset(opt) # be consistent with training dataset = torch.utils.data.Subset(dataset, range(len(dataset))) dataset = DataLoader(dataset, opt) else: opt.nclasses = len(dataset.dataset.dataset.classes) opt.input_nc = dataset.dataset.dataset.opt.input_nc dataset.dataset.dataset.opt.num_aug = 1 # dataset.dataset.dataset.opt.is_train = False model = ClassifierModel(opt) writer = Writer(opt) # test writer.reset_counter() for i, data in enumerate(dataset): model.set_input(data, epoch) loss, (prec1, prec5), y_pred, y_true = model.test() writer.update_counter(loss, prec1, prec5, y_pred, y_true) if epoch == -1: writer.plot_summary("val", dataset.dataset.dataset.classes) else: writer.plot(epoch, phase, dataset.dataset.dataset.classes) return writer.statistics.top1.avg
def test_load_from_path(self, mocked_fileops, mocked_writer_get_datafile, mocked_solution_is_solution_file, mocked_solution_load_path, mocked_writer_add_sol, mocked_lang_is_prevalent_ext): """ Ensure Writer.load_from_path properly assembles a Writer object """ # If the writer dir does not exist, writer should be none mocked_fileops.exists.return_value = False mocked_fileops.FileType = mock.MagicMock(DIRECTORY='lol') self.assertEqual(Writer.load_from_path('badPath'), None) mocked_fileops.exists.assert_called_with('badPath', 'lol') # If the writer dir does exist, a writer should be created mocked_fileops.exists.return_value = True mocked_writer_get_datafile.return_value = 'dataPath' mocked_fileops.get_json_dict.return_value = {'name' : 'brandon', 'email': '*****@*****.**'} mocked_fileops.get_files_in_dir.return_value = ['file1', 'file2'] mocked_solution_is_solution_file.return_value = True mocked_lang_is_prevalent_ext.return_value = True mocked_solution_load_path.return_value = 'sol' newWriter = Writer.load_from_path('path') mocked_fileops.exists.assert_called_with('path', 'lol') mocked_writer_get_datafile.assert_called_with() mocked_fileops.get_json_dict.assert_called_with('dataPath') mocked_fileops.get_files_in_dir.assert_called_with('path') mocked_solution_is_solution_file.assert_any_call('file1') mocked_solution_is_solution_file.assert_any_call('file2') mocked_solution_load_path.assert_any_call('file1') mocked_solution_load_path.assert_any_call('file2') mocked_writer_add_sol.assert_called_with('sol') self.assertEqual(newWriter.name, 'brandon') self.assertEqual(newWriter.email, '*****@*****.**')
def test_get_datafile_path(self, mock_writer_fileops): """ Ensure Writer._get_datafile_path properly delegates to fileops.join_path """ testWriter = Writer(writerPath='path') mock_writer_fileops.join_path.return_value = 'delegation' self.assertEquals(testWriter._get_datafile_path(), 'delegation') mock_writer_fileops.join_path.assert_called_with('path', Writer.DATAFILE_PATH)
def run_test(epoch=-1, is_val=True): print('Running Test') opt = TestOptions().parse() # No shuffling for test set opt.serial_batches = True opt.which_epoch = epoch # Set batch_size to 1 opt.batch_size = 1 # If we are running on the test set change the folder path to where the test meshes are stored if not is_val: opt.phase = "test" dataset = DataLoader(opt) if opt.verbose: print("DEBUG testpath: ", opt.dataroot) print("DEBUG dataset length ", len(dataset)) model = create_model(opt) writer = Writer(opt) writer.reset_counter() for i, data in enumerate(dataset): model.set_input(data) ncorrect, nexamples = model.test(epoch, is_val) if opt.verbose: print("DEBUG test ncorrect, nexamples ", ncorrect, nexamples) writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) return writer.acc
def test_get_solutions(self): """ Ensure Writer.get_solutions returns a list pertaining to the provided problem number """ testWriter = Writer() testWriter._solutions = { 1 : ['sol1', 'sol2', 'sol3'], 2 : ['sol4', 'sol5', 'sol6'], 3 : ['sol7', 'sol8', 'sol9'] } # Ensure correct list is returned self.assertEquals(testWriter.get_solutions(2), ['sol4', 'sol5', 'sol6']) # Ensure invalid number returns an empty list self.assertEquals(testWriter.get_solutions(5), [])
def edit_writer_information(writerFolderPath: str, writerName: str, writerEmail:str, writerLanguageList: list): """ Changes the details of the writer located in the provided folder to match the specified details. Overwrites all existing details. If a parameter is None, will not edit that writer detail. Arguments: writerFolderPath: str - The path to the writer whose details need to be changed. writerName: str - The new name of the writer writerEmail: str - The new email of the writer writerLanguageList: list - The new list of language names (str) that the writer knows. """ writerObject = Writer.load_from_folder(writerFolderPath) if writerObject is None: raise PyCException('Error: Writer {} does not exist'.format(writerFolderPath)) if not writerName is None: writerObject.name = writerName if not writerEmail is None: writerObject.email = writerEmail if not writerLanguageList is None: writerObject.clear_known_languages() writerObject.add_known_language_from_list(writerLanguageList) writerObject.save_changes()
def test_get_all_solutions(self): """ Ensure Writer.get_all_solutions returns a list of all solutions """ testWriter = Writer() # Ensure with no solutions an empty list is returned self.assertEquals(testWriter.get_all_solutions(), []) testWriter._solutions = { 1 : ['sol1', 'sol2', 'sol3'], 2 : ['sol4', 'sol5', 'sol6'], 3 : ['sol7', 'sol8', 'sol9'] } # Ensure all solutions are provided self.assertEquals(testWriter.get_all_solutions(), ['sol1', 'sol2', 'sol3', 'sol4', 'sol5', 'sol6', 'sol7', 'sol8', 'sol9'])
def add_user_quick(userName: str): """ Adds a user by simply creating their folder and adding their entry to the writers.json dict. If the writer exists, an exception is thrown. Arguments: userName: str - The writer name to create """ if Writers.writer_exists(userName): raise PyCException('Error: Writer {} already exists'.format(userName)) mappedWriterPath = PathMapper.get_mapped_path(userName) newWriter = Writer(writerPath=mappedWriterPath) try: newWriter.create() except Exception as e: raise PyCException('Error: Could not create writer{}'.format(userName))
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test writer.reset_counter(opt) for i, data in enumerate(dataset): #print(i) model.set_input(data) ncorrect, nexamples, mean_iou, iou = model.test() writer.update_counter(ncorrect, nexamples, mean_iou, iou) writer.print_acc(epoch, writer.acc) writer.print_iou(epoch, writer.mean_iou, writer.seg_iou) return writer.acc, writer.mean_iou, writer.iou
def test_add_solution(self): """ Ensure Writer.add_solution works as expected """ # Multiple solutions with the same problem number should be appended mockedSolution = mock.MagicMock() mockedSolution.problemNumber = 5 testWriter = Writer() testWriter._add_solution(mockedSolution) testWriter._add_solution(mockedSolution) self.assertEqual(testWriter._solutions, {5: [mockedSolution, mockedSolution]}) # Different problem numbers should go to different dict sections mockedSecondSolution = mock.MagicMock() mockedSecondSolution.problemNumber = 7 testWriter._add_solution(mockedSecondSolution) self.assertEqual(testWriter._solutions, {5: [mockedSolution, mockedSolution], 7: [mockedSecondSolution]})
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test dice_sum = 0 writer.reset_counter() for i, data in enumerate(dataset): model.set_input(data) ncorrect, nexamples, dice = model.test() dice_sum += dice writer.update_counter(ncorrect, nexamples) writer.save_test_acc(data, ncorrect, nexamples, dice) dice_sum /= len(dataset) writer.print_acc(epoch, writer.acc, dice_sum) return writer.acc
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test dice_sum = 0 writer.reset_counter() loss_mat = [] for i, data in enumerate(dataset): model.set_input(data) ncorrect, nexamples, dice, loss = model.test(loss_bool=True) loss_mat.append(loss.cpu().data.numpy()) dice_sum += dice writer.update_counter(ncorrect, nexamples) dice_sum /= len(dataset) writer.print_acc(epoch, writer.acc, dice_sum) writer.save_val_loss(loss_mat, epoch) return writer.acc
def _create_writer_from_list(datalist: list): # TODO: Change CSV implementation to use dict reader for better handling of # missing information """ Private function. Creates a single writer from a list following the format: ["folder","name","email","language1,language2"] Arguments: datalist: list - The list of data to load into the writer """ # First, verify that the data list is OK if datalist is None or not len(datalist) == 4: raise PyCException('Cannot create writer from datalist {}. Malformed' .format(str(datalist))) newWriter = Writer(writerPath=datalist[0],writerName=datalist[1], writerEmail=datalist[2]) newWriter.create() splitLanguageList = [languageName.strip() for languageName in datalist[3].split(',')] newWriter.add_known_language_from_list(splitLanguageList)
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test writer.reset_counter() for i, data in enumerate(dataset): model.set_input(data) ncorrect, nexamples, out = model.test() np.savetxt('results/' + str(i) + '.txt', out[0], delimiter=' ') writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) return writer.acc
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = MSDSurfTrainDataset(opt) dataloader = DataLoader(dataset) model = RegresserModel(opt) writer = Writer(opt) # test writer.reset_counter() for i, data in enumerate(dataloader): model.set_input(data) ncorrect, nexamples = model.test() writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) return writer.acc
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test writer.reset_counter() recon_test_loss = 0.0 for i, data in enumerate(dataset): model.set_input(data) if opt.dataset_mode == 'reconstruction': recon_test_loss += model.test() continue ncorrect, nexamples = model.test() writer.update_counter(ncorrect, nexamples) recon_test_loss /= len(dataset) if opt.dataset_mode == 'reconstruction': writer.print_acc(epoch, recon_test_loss) return recon_test_loss writer.print_acc(epoch, writer.acc) return writer.acc
def run_test(epoch=-1): print('Running Test') opt = TestOptions().parse() opt.serial_batches = True # no shuffle dataset = DataLoader(opt) model = create_model(opt) writer = Writer(opt) # test writer.reset_counter() heappop_error_test = 0 pred_classes = [] label_classes = [] for i, data in enumerate(dataset): model.set_input(data) if opt.dataset_mode == 'classification': try: ncorrect, nexamples, pred_class, label_class = model.test() pred_classes.append(pred_class.cpu().numpy()) label_classes.append(label_class.cpu().numpy()) #print(sklearn.metrics.classification_report(np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None))) writer.update_counter(ncorrect, nexamples) except IndexError: heappop_error_test += 1 print('(%d) IndexError occured, passed to next data' % (heappop_error_test)) pass else: ncorrect, nexamples, pred_class, label_class = model.test() writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) if opt.dataset_mode == 'classification': print( sklearn.metrics.classification_report( np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None))) return writer.acc
def run_test(epoch=-1): print('Running Test') opt = test_options().parse() dataset = ModelNet(root=opt.datasets, name='40_graph', train=False, pre_transform=FaceToGraph(remove_faces=True)) loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False) model = create_model(opt) writer = Writer(opt) writer.reset_counter() for i, data in enumerate(loader): if data.y.size(0) % 64 != 0: continue model.set_input_data(data) ncorrect, nexamples = model.test() writer.update_counter(ncorrect, nexamples) writer.print_acc(epoch, writer.acc) return writer.acc
def delete_users(writerNames: list): """ Deletes each user whose name is given in the provided list of writer names. If any of the users do not exist, a joint error message is provided. Arguments: writerNames: list - The list of writer names to delete """ invalidWriters = [] for writer in writerNames: writerToDelete = Writer.load_from_folder(writer) if writerToDelete is None: invalidWriters.append(writer) continue writerToDelete.delete() if len(invalidWriters) > 0: raise PyCException('Error: {} are invalid writers'.format(str(invalidWriters)))
def _get_loaded_writers(writerNames: list = None) -> list: """ Loads the writers with the provided names and returns them in a list. Arguments: writerNames: list - The list of names to load writers for. If None, all writers are loaded. """ if writerNames is None or len(writerNames) == 0: return Writers.get_all_writers() loadedWriters = [] for writerName in writerNames: loadedWriter = Writer.load_from_folder(writerName) if loadedWriter is None: raise PyCException('Error: {} is an invalid writer'.format(writerName)) else: loadedWriters.append(loadedWriter) return loadedWriters
def operate(args): """ Takes the passed in args and delegates to proper functionality. This is set as the executable function when the `writers assign` subparser is used. Arguments: args: Namespace - The arguments passed via CLI """ # If the user specified a problem list, use that as the list of problems. problemParser = NumberParse() if not args.problems is None: specifiedProblems = problemParser.str_list_to_uniq_range(args.problems) else: specifiedProblems = problemParser.str_list_to_uniq_range(['1+']) specifiedLanguages = [] if not args.language is None: for languageName in args.language: loadedLanguage = Languages.get_language_by_name(languageName) if loadedLanguage is None: raise PyCException('Error: {} is an invalid language'.format(languageName)) specifiedLanguages.append(loadedLanguage) else: specifiedLanguages = [Languages.get_language_by_name(name) for name in Languages.get_all_language_names()] specifiedWriters = [] if not args.full and not args.writer_names is None and len(args.writer_names) > 0: for writerName in args.writer_names: loadedWriter = Writer.load_from_folder(writerName) if loadedWriter is None: raise PyCException('Error: {} is an invalid writer'.format(writerName)) specifiedWriters.append(loadedWriter) else: specifiedWriters = Writers.get_all_writers() allocator = AssignmentAllocator(specifiedProblems, specifiedLanguages, specifiedWriters, fromScratch=args.full) allocator.do_assignment(Definitions.get_value('complete_threshold'), overflow=args.overallocate)
from torch_geometric.data import DataLoader from models import create_model from util.writer import Writer from test import run_test if __name__ == '__main__': opt = train_options().parse() # load dataset dataset = ModelNet(root=opt.datasets, name=str(opt.name), pre_transform=FaceToGraph(remove_faces=True)) print('# training meshes = %d' % len(dataset)) loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=True) model = create_model(opt) writer = Writer(opt) total_steps = 0 for epoch in range(1, opt.epoch): start_time = time.time() count = 0 running_loss = 0.0 for i, data in enumerate(loader): # break if data.y.size(0) % 64 != 0: continue total_steps += opt.batch_size count += opt.batch_size model.set_input_data(data) model.optimize() running_loss += model.loss_val
import time from options.train_options import TrainOptions from data import DataLoader from model import create_model from util.writer import Writer if __name__ == '__main__': opt = TrainOptions().parse() # base+train dataset = DataLoader(opt) # input data dataset_size = len(dataset) print('#training data = %d' % dataset_size) model = create_model(opt) writer = Writer(opt) total_steps = 0 for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 for i, data in enumerate(dataset): iter_start_time = time.time() if total_steps % opt.print_freq == 0: t_data = iter_start_time - iter_data_time total_steps += opt.batch_size epoch_iter += opt.batch_size model.set_input(data) model.optimize_parameters() if total_steps % opt.print_freq == 0:
from options.train_options import TrainOptions from data import DataLoader from models import create_model from util.writer import Writer from test_script import run_test import os import numpy as np if __name__ == '__main__': opt = TrainOptions().parse() dataset = DataLoader(opt) dataset_size = len(dataset) print('#training meshes = %d' % dataset_size) model = create_model(opt) writer = Writer(opt) total_steps = 0 for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 loss_mat = [] CE_mat = [] prior_mat = [] for i, data in enumerate(dataset): iter_start_time = time.time() if total_steps % opt.print_freq == 0: t_data = iter_start_time - iter_data_time total_steps += opt.batch_size
dataset = DataLoader(opt) dataset_size = len(dataset) print('#training meshes = %d' % dataset_size) logger.info('#training meshes = %d', dataset_size) model = create_model(opt) num_total_params = sum(p.numel() for p in model.net.parameters()) num_trainable_params = sum(p.numel() for p in model.net.parameters() if p.requires_grad) print('Number of total paramters: %d, number of trainable parameters: %d' % (num_total_params, num_trainable_params)) logger.info( 'Number of total paramters: %d, number of trainable parameters: %d', num_total_params, num_trainable_params) writer = Writer(opt) total_steps = 0 train_start_time = time.time() best_tst_acc = 0.0 torch.manual_seed(1) cudnn.benchmark = False cudnn.deterministic = True for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 heappop_error_train = 0 logger.info('Epoch %d started ...', epoch) writer.reset_counter()
__license__ = "MIT" __maintainer__ = "Andy Wang" """ Modifications made to: __main__ Functionality: saving and using best model to run test at end of training, running on val and test split plotting to tensorboard """ if __name__ == '__main__': opt = TrainOptions().parse() dataset = DataLoader(opt) dataset_size = len(dataset) print('#training meshes = %d' % dataset_size) model = create_model(opt) writer = Writer(opt) total_steps = 0 best_val_cls_acc = -1 best_val_reg_acc = 10000 best_epoch = None for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + opt.epoch_count): epoch_start_time = time.time() iter_data_time = time.time() epoch_iter = 0 train_loss_epoch = 0 for i, data in enumerate(dataset):