# TRAINING PARAMS num_images_train = len(filenames_train) num_images_valid = len(filenames_valid) batch_size = 512 num_epochs = 25 num_steps_per_epoch = int(np.floor(num_images_train/batch_size)) # Use entire dataset per epoch; round up to ensure entire dataset is covered if batch_size does not divide into num_images num_steps_per_epoch_valid = int(np.floor(num_images_valid/batch_size)) # As above seed_train = 587 seed_valid = seed_train+1 # Now create the training & validation datasets dataset_train = utils.create_dataset(filenames = filenames_train , labels = labels_train_onehot , num_channels = image_depth , batch_size = batch_size , shuffle_and_repeat = True , repeat_count = num_epochs , seed = seed_train) dataset_valid = utils.create_dataset(filenames = filenames_valid , labels = labels_valid_onehot , num_channels = image_depth , batch_size = batch_size , shuffle_and_repeat = True , repeat_count = num_epochs , seed = seed_valid) print("DATASETS CREATED") ### BUILD MODEL # Big: initial_filters=256, size_final_dense=100
is_cuda = not cfg.NO_CUDA and torch.cuda.is_available() torch.manual_seed(cfg.TRAIN.SEED) if is_cuda: torch.cuda.manual_seed(cfg.TRAIN.SEED) # TODO: Transforms for the images transform = transforms.Compose([]) print(transform) # Define trainset trainset = create_dataset(cfg, cfg.DATASET.SPLIT, 'train', length=cfg.DATASET.LENGTH, transform=transform) # Load train data train_loader = DataLoader(trainset, batch_size=cfg.TRAIN.BATCH_SIZE, shuffle=True, pin_memory=False, num_workers=cfg.DATASET.WORKERS) # Start epoch for the model start_epoch = cfg.TRAIN.START_EPOCH if cfg.DATASET.VAL: # Define validation dataset
def conduct_experiment(n_trials, n_samples, func, transition_period, tolerance_delay, random_seed=0): # fix seed for reproducibility np.random.seed(random_seed) mu_max = 1000 div_min = 1e-8 div_max = 1e8 df_result = pd.DataFrame() print("Create Dataset") dataset, changepoints = create_dataset(n_samples, func, transition_period) print("ChangeFinder") row = conduct_CF(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay) df_result = pd.concat([df_result, row], axis=0) print("BOCPD") row = conduct_BOCPD(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay) df_result = pd.concat([df_result, row], axis=0) print("Hierarchical 0th") row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, order=0) df_result = pd.concat([df_result, row], axis=0) print("Hierarchical 1st") row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, order=1) df_result = pd.concat([df_result, row], axis=0) print("Hierarchical 2nd") row = conduct_Hierarchical(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, order=2) df_result = pd.concat([df_result, row], axis=0) print("AW2S_MDL") row = conduct_AW2S_MDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay) df_result = pd.concat([df_result, row], axis=0) return df_result
# TRAINING PARAMS num_images_valid = len(filenames_valid) print("Num Images Valid:", num_images_valid) batch_size = 32 num_epochs = 1 num_steps_per_epoch_valid = int( np.ceil(num_images_valid / batch_size) ) # use np.ceil to ensure model predictions cover the whole validation set seed_train = None #587 seed_valid = None #seed_train+1 # Now create the training & validation datasets dataset_valid_noshuffle = utils.create_dataset(filenames=filenames_valid, labels=labels_valid_onehot, num_channels=image_depth, batch_size=batch_size, shuffle_and_repeat=False) print("DATASETS CREATED") ### PREDICTIONS # Open tensorflow session init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) config = tf.ConfigProto() #config.log_device_placement=True with tf.Session(config=config) as sess: sess.run(init) print("TF SESSION OPEN")
def conduct_experiment(n_trials, n_samples, func, transition_period, tolerance_delay, random_seed=0): # fix seed for reproducibility np.random.seed(random_seed) mu_max = 1000 div_min = 1e-8 div_max = 1e8 df_result = pd.DataFrame() print("Create Dataset") dataset, changepoints = create_dataset(n_samples, func, transition_period) print("ChangeFinder") row = conduct_CF(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay) df_result = pd.concat([df_result, row], axis=0) print("BOCPD") row = conduct_BOCPD(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay) df_result = pd.concat([df_result, row], axis=0) print("S-MDL 0th") params = { "mu_max": mu_max, "div_min": div_min, "div_max": div_max, "order": 0 } row = conduct_SDMDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, params=params) df_result = pd.concat([df_result, row], axis=0) print("S-MDL 1st") params = { "mu_max": mu_max, "div_min": div_min, "div_max": div_max, "order": 1 } row = conduct_SDMDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, params=params) df_result = pd.concat([df_result, row], axis=0) print("S-MDL 2nd") params = { "mu_max": mu_max, "div_min": div_min, "div_max": div_max, "order": 2 } row = conduct_SDMDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, params=params) df_result = pd.concat([df_result, row], axis=0) print("FW2S-MDL") params = {"mu_max": mu_max, "div_min": div_min, "div_max": div_max} row = conduct_FW2S_MDL(n_trials=n_trials, n_samples=n_samples, dataset=dataset, changepoints=changepoints, tolerance_delay=tolerance_delay, params=params) df_result = pd.concat([df_result, row], axis=0) return df_result