DO_VALID = True # disable this to not bother with the validation set evaluation DO_TEST = True # disable this to not generate predictions on the testset target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz") target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename) target_path_test = os.path.join("predictions/final/augmented/test", target_filename) print("Loading model data etc.") analysis = np.load(ANALYSIS_PATH) input_sizes = [(69, 69), (69, 69)] ds_transforms = [ ra.build_ds_transform(3.0, target_size=input_sizes[0]), ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45) ] num_input_representations = len(ds_transforms) # split training data into training + a small validation set num_train = load_data.num_train num_valid = num_train // 10 # integer division num_train -= num_valid num_test = load_data.num_test valid_ids = load_data.train_ids[num_train:] train_ids = load_data.train_ids[:num_train] test_ids = load_data.test_ids
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz" # DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz" # FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy" if continueAnalysis: print "Loading model data" analysis = np.load(ANALYSIS_PATH) print "Set up data loading" # TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime input_sizes = [(69, 69), (69, 69)] ds_transforms = [ ra.build_ds_transform(3.0, target_size=input_sizes[0]), ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45) ] num_input_representations = len(ds_transforms) augmentation_params = { 'zoom_range': (1.0 / 1.3, 1.3), 'rotation_range': (0, 360), 'shear_range': (0, 0), 'translation_range': (-4, 4), 'do_flip': True, } augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE, augmentation_params=augmentation_params, ds_transforms=ds_transforms,
DO_TEST = True # disable this to not generate predictions on the testset target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz") target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename) target_path_test = os.path.join("predictions/final/augmented/test", target_filename) print "Loading model data etc." analysis = np.load(ANALYSIS_PATH) input_sizes = [(69, 69), (69, 69)] ds_transforms = [ ra.build_ds_transform(3.0, target_size=input_sizes[0]), ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)] num_input_representations = len(ds_transforms) # split training data into training + a small validation set num_train = load_data.num_train num_valid = num_train // 10 # integer division num_train -= num_valid num_test = load_data.num_test valid_ids = load_data.train_ids[num_train:] train_ids = load_data.train_ids[:num_train] test_ids = load_data.test_ids train_indices = np.arange(num_train)
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz" # DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz" # DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz" # DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz" # DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz" TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048.csv" ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048.pkl" # FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy" print "Set up data loading" # TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime input_sizes = [(69, 69), (69, 69)] ds_transforms = [ra.build_ds_transform(3.0, target_size=input_sizes[0])] num_input_representations = len(ds_transforms) augmentation_params = { "zoom_range": (1.0 / 1.3, 1.3), "rotation_range": (0, 360), "shear_range": (0, 0), "translation_range": (-4, 4), "do_flip": False, } augmented_data_gen = ra.realtime_augmented_data_gen( num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE, augmentation_params=augmentation_params,