def get_class_weights(class_frequencies, exponent=0.5):
    class_frequencies = np.array(class_frequencies).astype(np.float32)**exponent
    class_frequencies2 = deepcopy(class_frequencies)
    for i in range(len(class_frequencies)):
        class_frequencies2[i] = class_frequencies[range(len(class_frequencies)) != i] / class_frequencies[i]
    class_frequencies2 /= np.sum(class_frequencies2)
    class_frequencies2 *= len(class_frequencies)
    return class_frequencies2

data_gen_validation = DavidSegDataGenerator(patients_validation, BATCH_SIZE, PATCH_SIZE=CROP_PATCHES_TO_THIS, num_batches=None, seed=None)
data_gen_validation = segDataAugm.center_crop_seg_generator(data_gen_validation, OUTPUT_PATCH_SIZE)
data_gen_validation = MultiThreadedGenerator(data_gen_validation, 1, 1)
data_gen_validation._start()

data_gen_train = DavidSegDataGenerator(patients_train, BATCH_SIZE, PATCH_SIZE=INPUT_PATCH_SIZE, num_batches=None, seed=None)
data_gen_train = segDataAugm.center_crop_generator(data_gen_train, (260, 260, 260))
data_gen_train = segDataAugm.elastric_transform_generator(data_gen_train, 900, 12)
data_gen_train = segDataAugm.mirror_axis_generator(data_gen_train)
data_gen_train = segDataAugm.center_crop_generator(data_gen_train, CROP_PATCHES_TO_THIS)
data_gen_train = segDataAugm.center_crop_seg_generator(data_gen_train, OUTPUT_PATCH_SIZE)
data_gen_train = MultiThreadedGenerator(data_gen_train, 8, 8)
data_gen_train._start()

net = build_UNet3D(5, BATCH_SIZE, num_output_classes=num_classes, base_n_filters=16, input_dim=CROP_PATCHES_TO_THIS, pad=0)
output_layer_for_loss = net["output_flattened"]

n_batches_per_epoch = 300
# n_batches_per_epoch = np.floor(n_training_samples/float(BATCH_SIZE))
n_test_batches = 30
# n_test_batches = np.floor(n_val_samples/float(BATCH_SIZE))