self.test_results_dir = "/out" if __name__ == "__main__": # Get configuration # TASK: Fill in parameters of the Config class and specify directory where the data is stored and # directory where results will go c = Config() # Load data print("Loading data...") # TASK: LoadHippocampusData is not complete. Go to the implementation and complete it. data = LoadHippocampusData(c.root_dir, y_shape=c.patch_size, z_shape=c.patch_size) # Create test-train-val split # In a real world scenario you would probably do multiple splits for # multi-fold training to improve your model quality keys = range(len(data)) # Here, random permutation of keys array would be useful in case if we do something like # a k-fold training and combining the results. split = dict() # TASK: create three keys in the dictionary: "train", "val" and "test". In each key, store # the array with indices of training volumes to be used for training, validation
c.set_model_name(args.modelname) if args.testonly: # need to also provide a weights filename if we're only testing print("Testing mode.") c.set_test(True) if not args.weights: print("Please also provide a weights filename through -w") sys.exit() # Load data print("Loading data...") # TASK: LoadHippocampusData is not complete. Go to the implementation and complete it. data = LoadHippocampusData(c.root_dir + "TrainingSet/", y_shape=c.patch_size, z_shape=c.patch_size) # Create test-train-val split # In a real world scenario you would probably do multiple splits for # multi-fold training to improve your model quality data_len = len(data) keys = range(data_len) # Here, random permutation of keys array would be useful in case if we do something like # a k-fold training and combining the results. # TASK: create three keys in the dictionary: "train", "val" and "test". In each key, store # the array with indices of training volumes to be used for training, validation # and testing respectively.