def count_variables(self):
     if self.model is not None:
         trainable = count_params(self.model.trainable_weights)
         non_trainable = count_params(self.model.non_trainable_weights)
         return ({"trainable": trainable, "non_trainable": non_trainable})
     else:
         return None
Ejemplo n.º 2
0
def freeze_model(model, num = -1, trainable = False):

    from keras.utils.layer_utils import count_params

    if trainable:
      for layer in model.layers:
        layer.trainable = True

    if num==-1:
      for layer in model.layers:
        layer.trainable = False
    else:
      for layer in model.layers[:num]:
        layer.trainable = False

    # recompile model
    model.compile(loss = model.loss, optimizer=model.optimizer, metrics = model.metrics)


    trainable_count = count_params(model.trainable_weights)
    non_trainable_count = count_params(model.non_trainable_weights)


    print(f'Trainable Params: {trainable_count:,d} || Non-Trainable Params: {non_trainable_count:,d}')

    return model
Ejemplo n.º 3
0
def count_params(model):
    from keras.utils.layer_utils import count_params
    model._check_trainable_weights_consistency()
    if hasattr(model, '_collected_trainable_weights'):
        trainable_count = count_params(model._collected_trainable_weights)
    else:
        trainable_count = count_params(model.trainable_weights)
    return trainable_count
Ejemplo n.º 4
0
    def print_weights(self):
      no_trainable = count_params(self.SimCLR_classifier.trainable_weights)
      no_nontrainable = count_params(self.SimCLR_classifier.non_trainable_weights)   

      print(f'trainable counts (1): {round(no_trainable/1e6, 2)} M.')
      print(f'trainable counts (2): {len(self.SimCLR_classifier.trainable_weights)}')
      print(f'non-trainable counts (1): {round(no_nontrainable/1e6, 2)} M.')
      print(f'non-trainable counts (2): {len(self.SimCLR_classifier.non_trainable_weights)}')
    def print_weights(self):
        """ Function to print (non)-learnable weights
            Helps checking unfreezing process
        """
        trainable_count = count_params(self.SimCLR_model.trainable_weights)
        non_trainable_count = count_params(
            self.SimCLR_model.non_trainable_weights)

        print(f"trainable parameters: {round(trainable_count/1e6,2)} M.")
        print(
            f"non-trainable parameters: {round(non_trainable_count/1e6,2)} M.")
Ejemplo n.º 6
0
def get_structure_info(model: tf.keras.Model) -> StructureInfo:
    trainable_count = count_params(model.trainable_weights)
    non_trainable_count = count_params(model.non_trainable_weights)

    info = StructureInfo()
    info.variable_count = trainable_count
    info.weights_size = (trainable_count + non_trainable_count) * 4
    info.svg = model_to_dot(model,
                            show_layer_names=True,
                            show_shapes=True,
                            dpi=None).create(prog='dot', format='svg')
    info.flops = get_flops(model, batch_size=1)
    return info
  def datadefreezer(self, num_of_unfrozen_layers, prout=False):
    for layer in range(0, len(self.classification.layers), 1):
      if layer < number_of_unfrozen_layers:
        self.classification.layers[layer].trainable = False
      else:
        self.classification.layers[layer].trainable = True

    if prout:
      no_trainable = count_params(self.classification.trainable_weights)
      no_nontrainable = count_params(self.classification.non_trainable_weights)

      print(f'trainable counts (1): {round(no_trainable/1e6, 2)} M.')
      print(f'trainable counts (2): {len(self.classification.trainable_weights)}')
      print(f'non-trainable counts (1): {round(no_nontrainable/1e6, 2)} M.')
      print(f'non-trainable counts (2): {len(self.classification.non_trainable_weights)}')
Ejemplo n.º 8
0
    def get_de_loss(self, net_model, train_hist, last_n_perc=0.1):
        """
            Make the differential evolution loss.

            :return:
        """
        nb_params = count_params(net_model.trainable_weights)
        res_loss_fct = self.de_loss['Res_Loss']
        param_loss_fct = self.de_loss['Param_Loss']

        last_n = int(last_n_perc * self.epochs)
        last_n_losses = train_hist.history['loss'][-last_n - 1:]

        if self.loss_mode == 'last_n':
            res_loss_val = np.mean(last_n_losses)
        else:

            res_loss_val = res_loss_fct(
                self.train_dict['y'],
                net_model.predict(self.train_dict['x']).flatten()).numpy()

        p_loss_val = param_loss_fct(nb_params)
        return res_loss_val, p_loss_val, {
            'Residual Loss': res_loss_val,
            'Nb Params Loss': p_loss_val
        }
Ejemplo n.º 9
0
    def __init__(self,
                 output_directory,
                 input_shape,
                 nb_classes,
                 verbose=False,
                 build=True,
                 batch_size=64,
                 lr=0.001,
                 nb_filters=32,
                 use_residual=True,
                 use_bottleneck=True,
                 depth=6,
                 kernel_size=41,
                 nb_epochs=2000,
                 bottleneck_size=32,
                 class_weight=None):

        self.output_directory = output_directory

        self.loss = coral.OrdinalCrossEntropy(num_classes=nb_classes,
                                              importance_weights=class_weight)

        self.nb_filters = nb_filters
        self.use_residual = use_residual
        self.use_bottleneck = use_bottleneck
        self.depth = depth
        self.kernel_size = kernel_size - 1
        self.callbacks = None
        self.batch_size = batch_size
        self.bottleneck_size = bottleneck_size
        self.nb_epochs = nb_epochs
        self.lr = lr
        self.verbose = verbose

        if build == True:
            self.model = self.build_model(input_shape, nb_classes)
            if (verbose == True):
                self.model.summary()
            self.model.save_weights(self.output_directory + 'model_init.hdf5')

        trainable_count = count_params(self.model.trainable_weights)

        model_hyper = {
            'model': 'masked-inception',
            'filters': nb_filters,
            'residuals': use_residual,
            'bottleneck': use_bottleneck,
            'depth': depth,
            'kernel_size': kernel_size,
            'batch_size': batch_size,
            'epochs': nb_epochs,
            'bottleneck_size': self.bottleneck_size,
            'classes': nb_classes,
            'input_shape': input_shape,
            'trainable_params': trainable_count
        }

        f = open(os.path.join(self.output_directory, 'hyperparams.txt'), "w")
        f.write(str(model_hyper))
        f.close()
Ejemplo n.º 10
0
    def __init__(self, output_directory, input_shape, nb_classes, lr=0.001,
                 batch_size=16, verbose=2, nb_epochs=2000, depth=1,
                 filters=16, window=21, decay=False):

        input_shape = (None, None, input_shape[-1])
        self.output_directory = output_directory

        self.batch_size = batch_size
        self.verbose = verbose
        self.lr = lr
        self.nb_epochs = nb_epochs
        self.input_shape = input_shape
        self.nb_classes = nb_classes
        self.depth = depth
        self.filters = filters
        self.window = window
        self.decay = decay

        self.model = self.build_model()

        trainable_count = count_params(self.model.trainable_weights)
        model_hyper = {'model': 'masked-xcm', 'filters': filters,
                       'depth': depth, 'window_size': window, 'decay': decay,
                       'batch_size': batch_size, 'classes': nb_classes,
                       'input_shape': input_shape, 'epochs': nb_epochs,
                       'trainable_params': trainable_count}

        f = open(os.path.join(self.output_directory, 'hyperparams.txt'), "w")
        f.write(str(model_hyper))
        f.close()

        return
    def on_epoch_end(self, epoch, logs=None):
        super().on_epoch_end(epoch, logs)

        trainable_count = layer_utils.count_params(
            self.model.trainable_weights)
        non_trainable_count = layer_utils.count_params(
            self.model.non_trainable_weights)

        self._params_logger.log_scalar("trainable_parameters", trainable_count,
                                       epoch)
        self._params_logger.log_scalar("non_trainable_parameters",
                                       non_trainable_count, epoch)

        if self._verbose > 0:
            print("Trainable PARAMS at epoch {0}: {1:,}".format(
                epoch, trainable_count))
            print("Non trainable PARAMS at epoch {0}: {1:,}".format(
                epoch, non_trainable_count))
Ejemplo n.º 12
0
    def construct(self):
        inputs = [Input(input_shape) for input_shape in self.input_shapes]
        outputs = self.construct_graph_fn(inputs)
        self.model = Model(inputs=inputs, outputs=outputs)
        self._compile()

        if mlflow.active_run():
            mlflow.log_metric('num_of_parameters',
                              count_params(self.model.trainable_weights))
        return self.model
Ejemplo n.º 13
0
    def update_from_model(self):
        from keras.models import load_model
        from keras.utils import plot_model
        from keras.utils.layer_utils import count_params

        model = load_model(
            os.path.join(settings.MEDIA_ROOT, self.model_file.name))
        model._check_trainable_weights_consistency()
        self.inputs = [str(i) for i in model.inputs]
        self.outputs = [str(o) for o in model.outputs]
        self.config = model.get_config()
        self.nb_non_trainable = count_params(model.non_trainable_weights)
        if hasattr(model, '_collected_trainable_weights'):
            self.nb_trainable = count_params(
                model._collected_trainable_weights)
        else:
            self.nb_trainable = count_params(model.trainable_weights)

        img_path = os.path.join(settings.MEDIA_ROOT, self.plot_file())
        plot_model(model, to_file=img_path)
Ejemplo n.º 14
0
    def __init__(self, output_directory, input_shape, nb_classes, lr=0.001,
                 batch_size=16, verbose=2, nb_epochs=2000, depth=1, mask=True,
                 filters=16, window=21, decay=False, metric='val_accuracy',
                 reduce_lr_metric='train_loss'):

        input_shape = (None, None, input_shape[-1])
        self.output_directory = output_directory

        self.batch_size = batch_size
        self.verbose = verbose
        self.lr = lr
        self.nb_epochs = nb_epochs
        self.input_shape = input_shape
        self.nb_classes = nb_classes
        self.depth = depth
        self.filters = filters
        self.window = window
        self.decay = decay
        self.mask = mask

        if metric == 'train_loss':
            self.metric = 0
        elif metric == 'train_accuracy':
            self.metric = 1
        elif metric == 'val_loss':
            self.metric = 2
        else:
            self.metric = 3

        if reduce_lr_metric == 'train_accuracy':
            self.reduce_lr_metric = 1
        elif reduce_lr_metric == 'val_loss':
            self.reduce_lr_metric = 2
        elif reduce_lr_metric == 'val_accuracy':
            self.reduce_lr_metric = 3
        else:
            self.reduce_lr_metric = 0

        self.model = self.build_model()

        trainable_count = count_params(self.model.trainable_weights)
        model_hyper = {'model': 'masked-xcm', 'filters': filters, 'mask': mask,
                       'depth': depth, 'window_size': window, 'decay': decay,
                       'batch_size': batch_size, 'classes': nb_classes,
                       'input_shape': input_shape, 'epochs': nb_epochs,
                       'trainable_params': trainable_count, 'metric': metric,
                       'lr_metric': reduce_lr_metric}

        f = open(os.path.join(self.output_directory, 'hyperparams.txt'), "w")
        f.write(str(model_hyper))
        f.close()

        return
Ejemplo n.º 15
0
    def __init__(self,
                 output_directory,
                 input_shape,
                 nb_classes,
                 verbose=False,
                 build=True,
                 load_weights=False,
                 n_feature_maps=64,
                 depth=3,
                 nb_epochs=1500,
                 batch_size=64):
        self.output_directory = output_directory
        self.n_feature_maps = n_feature_maps
        self.depth = depth
        self.nb_epochs = nb_epochs
        self.batch_size = batch_size

        if build == True:
            self.model = self.build_model(input_shape, nb_classes)
            if (verbose == True):
                self.model.summary()
            self.verbose = verbose
            if load_weights == True:
                self.model.load_weights(
                    self.output_directory.replace('resnet_augment', 'resnet').
                    replace('TSC_itr_augment_x_10', 'TSC_itr_10') +
                    '/model_init.hdf5')
            else:
                self.model.save_weights(self.output_directory +
                                        'model_init.hdf5')

        trainable_count = count_params(self.model.trainable_weights)
        model_hyper = {
            'model': 'masked-resnet',
            'classes': nb_classes,
            'input_shape': input_shape,
            'depth': depth,
            'feature_maps': n_feature_maps,
            'epochs': nb_epochs,
            'trainable_params': trainable_count,
            'batch_size': batch_size
        }

        f = open(os.path.join(self.output_directory, 'hyperparams.txt'), "w")
        f.write(str(model_hyper))
        f.close()

        return
Ejemplo n.º 16
0
def build_and_fit_model(x, *args):
    train_data_gen, val_data_gen = args

    layer_codes = np.array(x).reshape(-1, 2).astype(int)
    print(layer_codes)

    try:
        model = build_model(layer_codes)
    except ValueError:
        return 0.0

    model.compile(loss="categorical_crossentropy",
                  optimizer=Adadelta(),
                  metrics=["accuracy"])

    print(model.summary())
    trainable_count = count_params(model.trainable_weights)
    print("trainable_count", trainable_count)
    if trainable_count > 0:
        try:
            history = model.fit_generator(
                generator=train_data_gen,
                steps_per_epoch=train_data_gen.n // train_data_gen.batch_size,
                epochs=10,
                validation_data=val_data_gen,
                validation_steps=val_data_gen.n // val_data_gen.batch_size,
                verbose=1)
        except ValueError:
            return 0.0

        val_acc = history.history["val_acc"][-1]
        with open(_RESULTS_FILE_NAME, "a") as f:
            f.write("{}\n".format(
                json_tricks.dumps(
                    {
                        "model_config": model.to_json(),
                        "val_acc": val_acc,
                    },
                    indent=4,
                    sort_keys=True)))

    else:
        val_acc = 0.0

    return -val_acc
Ejemplo n.º 17
0
    def __init__(self,
                 output_directory,
                 input_shape,
                 nb_classes,
                 depth=4,
                 kernel_size=21,
                 filters=64,
                 verbose=2,
                 build=True,
                 nb_epochs=2000,
                 batch_size=64):
        self.output_directory = output_directory
        self.depth = depth
        self.kernel_size = kernel_size
        self.filters = filters
        self.nb_epochs = nb_epochs
        self.batch_size = batch_size

        if build == True:
            self.model = self.build_model(input_shape, nb_classes)
            if (verbose == True):
                self.model.summary()
            self.verbose = verbose
            self.model.save_weights(self.output_directory + 'model_init.hdf5')

        trainable_count = count_params(self.model.trainable_weights)
        model_hyper = {
            'model': 'masked-fcn',
            'filters': filters,
            'depth': depth,
            'kernel_size': kernel_size,
            'batch_size': batch_size,
            'classes': nb_classes,
            'input_shape': input_shape,
            'epochs': nb_epochs,
            'trainable_params': trainable_count
        }

        f = open(os.path.join(self.output_directory, 'hyperparams.txt'), "w")
        f.write(str(model_hyper))
        f.close()
        return
Ejemplo n.º 18
0
def test_model_implementation_should_look_like(raw_data, simple_transformer):
    transformed_model_data = simple_transformer.transform(raw_data)
    train_data, test_data = Splitter.train_test_split(transformed_model_data)

    backend.clear_session()
    convnet = ChunkyCNN(data_shape=(72, 63),
                        model_config_dir='./tests/model_dir',
                        component_name='global')
    assert count_params(convnet.model.trainable_weights) == 32414

    convnet.train(train_data, n_epochs=1)
    layer_0_weights = convnet.model.layers[0].get_weights()
    convnet.load_model_weights('global_modelparams.h5')
    convnet.train(train_data, n_epochs=1)
    assert not np.array_equal(layer_0_weights,
                              convnet.model.layers[0].get_weights())

    convnet.load_model_weights('global_modelparams.h5')
    prediction = convnet.predict(test_data)
    assert all(np.round(prediction.probabilities) == prediction.classes)

    evaluation_stats = convnet.evaluate(test_data, prediction)
    assert np.sum(evaluation_stats.support) == len(test_data.labels)
Ejemplo n.º 19
0
def get_number_of_weights(model):
    return count_params(model.trainable_weights) + count_params(
        model.non_trainable_weights)
Ejemplo n.º 20
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.conv_layers = {c: [] for c in ['i', 'f', 'c', 'o', 'a', 'ahat']}

        for l in range(self.nb_layers):
            for c in ['i', 'f', 'c', 'o']:
                act = self.LSTM_activation if c == 'c' else self.LSTM_inner_activation
                self.conv_layers[c].append(
                    Conv2D(self.R_stack_sizes[l],
                           self.R_filt_sizes[l],
                           padding='same',
                           activation=act,
                           data_format=self.data_format))

            act = 'relu' if l == 0 else self.A_activation
            self.conv_layers['ahat'].append(
                Conv2D(self.stack_sizes[l],
                       self.Ahat_filt_sizes[l],
                       padding='same',
                       activation=act,
                       data_format=self.data_format))

            if l < self.nb_layers - 1:
                self.conv_layers['a'].append(
                    Conv2D(self.stack_sizes[l + 1],
                           self.A_filt_sizes[l],
                           padding='same',
                           activation=self.A_activation,
                           data_format=self.data_format))

        self.upsample = UpSampling2D(data_format=self.data_format)
        self.pool = MaxPooling2D(data_format=self.data_format)

        self.trainable_weights = []
        print('PredNet trainable layers:', self.trainable_layers)
        print('PredNet trainable units:', self.trainable_units)

        nb_row, nb_col = (
            input_shape[-2],
            input_shape[-1]) if self.data_format == 'channels_first' else (
                input_shape[-3], input_shape[-2])
        trainable_param_count = 0
        non_trainable_param_count = 0

        for c in sorted(self.conv_layers.keys()):
            for l in range(len(self.conv_layers[c])):
                ds_factor = 2**l
                if c == 'ahat':
                    nb_channels = self.R_stack_sizes[l]
                elif c == 'a':
                    nb_channels = 2 * self.R_stack_sizes[l]
                else:
                    nb_channels = self.stack_sizes[l] * 2 + self.R_stack_sizes[
                        l]
                    if l < self.nb_layers - 1:
                        nb_channels += self.R_stack_sizes[l + 1]
                in_shape = (input_shape[0], nb_channels, nb_row // ds_factor,
                            nb_col // ds_factor)
                if self.data_format == 'channels_last':
                    in_shape = (in_shape[0], in_shape[2], in_shape[3],
                                in_shape[1])

                trainable = (self.trainable_layers is None or l in self.trainable_layers) and \
                            (self.trainable_units is None or c in self.trainable_units)

                with K.name_scope('layer_' + c + '_' + str(l)):
                    self.conv_layers[c][l].trainable = trainable
                    self.conv_layers[c][l].build(in_shape)

                if trainable:
                    layer_param_count = layer_utils.count_params(
                        self.conv_layers[c][l].trainable_weights)
                    print(
                        f'Layer {c}_{l} trainable params: {layer_param_count}')
                    trainable_param_count += layer_param_count
                    self.trainable_weights += self.conv_layers[c][
                        l].trainable_weights
                else:
                    layer_param_count = layer_utils.count_params(
                        self.conv_layers[c][l].non_trainable_weights)
                    print(
                        f'Layer {c}_{l} non-trainable params: {layer_param_count}'
                    )
                    non_trainable_param_count += layer_param_count
                    self.non_trainable_weights += self.conv_layers[c][
                        l].non_trainable_weights

        print('PredNet trainable params:', trainable_param_count)
        print('PredNet non-trainable params:', non_trainable_param_count)

        self.states = [None] * self.nb_layers * 3

        if self.extrap_start_time is not None:
            self.t_extrap = K.variable(
                self.extrap_start_time,
                int if K.backend() != 'tensorflow' else 'int32')
            self.states += [None] * 2  # [previous frame prediction, timestep]

        if self.stateful:
            self.reset_states()
Ejemplo n.º 21
0
def wrap_train_test(gene):
    global x_train, y_train, x_test, y_test
    global x_train_shapes, x_test_shapes
    runid = "N/A"
    print(gene)

    with open("tested.log", "a") as f:
        f.write(genestr(gene))
        f.write("\n")

    print("\nWrapping...\n")
    strategy = tf.distribute.MirroredStrategy()
    print('Number of devices: {}'.format(strategy.num_replicas_in_sync))

    # reshaping of the training data
    if gene[-1][0] == 2:  # reshaping is enabled
        desired_size = gene[0][1]
        if desired_size not in x_train_shapes:
            x_train_shapes[desired_size] = resize(x_train, desired_size)
        if desired_size not in x_test_shapes:
            x_test_shapes[desired_size] = resize(x_test, desired_size)

        x_train_current = x_train_shapes[desired_size]
        x_test_current = x_test_shapes[desired_size]
    elif gene[-1][0] == 1:  # no reshaping
        x_train_current = x_train
        x_test_current = x_test
    else:
        print("#### INVALID GENE - last value is not 1 nor 2", gene[-1][0])
        return runid, 0

    # define model
    try:
        print("x_train shape: " + str(x_train_current.shape[1:]))
        model, eval_model, manipulate_model = CapsNet(
            gene=gene,
            input_shape=x_train_current.shape[1:],
            n_class=len(np.unique(np.argmax(y_train, 1))),
            routings=args.routings)
    except ValueError as e:  # some bug in the chromosome ....
        print("#### VALUE error desc ", e)
        print("#### VALUE error gene ", gene)
        tf.keras.backend.clear_session()
        K.clear_session()
        return runid, 0
    except tf.errors.ResourceExhaustedError as e:  # some bug in the chromosome ....
        print("#### Out of resources error desc ", e)
        print("#### Out of resources error gene ", gene)
        tf.keras.backend.clear_session()
        K.clear_session()
        return runid, 0

    model.summary()

    trainable_count = count_params(model.trainable_weights)
    if args.max_params > 0 and trainable_count > args.max_params:
        print(
            f"## ERR: number of trainable params {trainable_count} exceeded limit {args.max_params}"
        )
        tf.keras.backend.clear_session()
        K.clear_session()
        return runid, 0

    # train or test
    if args.weights is not None:  # init the model weights with provided one
        model.load_weights(args.weights)
    if not args.testing:
        # if gene[len(gene)-1][0]==2:
        #     x_train = resize(x_train, gene[0][1]) #64
        #     x_test = resize(x_test, gene[0][1])
        #     train(model=model, data=((x_train, y_train), (x_test, y_test)), args=args)
        # elif gene[len(gene)-1][0]==1:
        print("Train shapes:", x_train.shape, y_train.shape)
        runid, _ = train(model=model,
                         data=((x_train_current, y_train), (x_test_current,
                                                            y_test)),
                         args=args)
    else:  # as long as weights are given, will run testing
        if args.weights is None:
            print(
                'No weights are provided. Will test using random initialized weights.'
            )
    test_acc = test(model=eval_model, data=(x_test_current, y_test), args=args)

    tf.keras.backend.clear_session()
    K.clear_session()
    return runid, test_acc
def train_iitnet_crossvalid():
    """
    Train the iitnet using cross validation. Using only training and validation data
    for parameter tuning. Best model will then be evaluated in a separate program.
    """

    # log timestamps of relevant stages
    start_processing = datetime.datetime.now()
    timestamps = {'processstart': start_processing}

    # print used devices
    print("Using GPU:", K.tensorflow_backend._get_available_gpus())

    # get parameters
    params = Params()
    # get additional parameters for iitnet
    plx: dict = pp3.get_parameters()
    params.plx.update(plx)

    # adjust winslow parameters
    if 'WINSLOW_PIPELINE_NAME' in os.environ:
        winslow_params(params)

    params.plx['subject_batch'] = 1  # !
    # NOTE: mdl_architecture has to be set to 'iitnet_cnn_bilstm'

    # set local parameters for the cross validation
    k = params.plx.get('k_crossval')
    train_total = params.plx.get('train_count') + params.plx.get('val_count')
    count_per_fold = train_total // k

    data_int = DataInt(save_path=params.plx["save_path"],
                       perform_save_raw=params.plx["save_raw_data"],
                       key_labels=params.plx["key_labels"],
                       uuid=params.plx["experiment_uuid"])

    # Process data, if not already processed
    if not params.plx.get("data_already_processed"):
        # Process Data
        process_data(params, data_int, params.plx["data_count"])
    else:
        # recover self.experiment.data_objects_list = List of the subject names
        preprocessed_data_path = params.plx["save_path"] + params.plx[
            "experiment_uuid"]  # "D:/PhysioNet/processed/sa6pr7/"
        pickle_object = params.plx["experiment_uuid"] + ".pckl"
        subject_folders = [
            name for name in os.listdir(preprocessed_data_path)
            if not name == pickle_object
        ]

        relevant_subjects = subject_folders[:train_total]
        data_int.experiment.recover_data_objectlist(relevant_subjects)

        print("Data already processed. Recover", str(len(relevant_subjects)),
              "Subjects from", preprocessed_data_path)

    num_epochs = params.plx.get('epochs')
    apply_oversampling = params.plx.get(
        'apply_oversampling')  # !only on training data

    timestamps['modelstart'] = datetime.datetime.now()
    # build model
    model, callbacks = choose_model(params, compile=False)
    timestamps['modelend'] = datetime.datetime.now()
    # save untrained model
    if k > 1:
        print("Save untrained model ... ", end=" ")
        model.save(params.file_path_raw_mdl)
        print("done")

    timestamps_trainingstart = []
    timestamps_trainingend = []
    all_val_accs = []
    all_val_loss = []
    timestamps['crossval_start'] = datetime.datetime.now()

    for i in range(k):
        print("\n=============================================")
        print("=======> Cross Validation - Fold #", i + 1, "<=======")
        print("=============================================")

        # get raw model
        if k > 1:
            print("Load untrained model ... ", end=" ")
            model = load_model(params.file_path_raw_mdl)
            print("done")
        # compile model
        model = compile_model_iitnet(params=params, model=model)

        # set indices for the data to be loaded in this fold
        if k == 1:
            train_start = 0
            train_end = int(train_total * 0.8)
            val_start = train_end
            train_count = train_end
            val_count = train_total - train_count
        else:
            train_start = i * count_per_fold
            train_end = train_start + (count_per_fold * (k - 1))
            if train_end >= train_total:
                train_end -= train_total
            val_start = train_end
            if val_start >= train_total:
                val_start = 0

            # configure the data generators for training and validation
            train_count = train_total - count_per_fold
            val_count = count_per_fold

        train_generator = InterIntraEpochGenerator(
            data_int,
            params,
            train_count,
            start_val=train_start,
            shuffle=True,
            oversampling=apply_oversampling,
            crossval_samples=train_total)
        validation_generator = InterIntraEpochGenerator(
            data_int,
            params,
            val_count,
            start_val=val_start,
            crossval_samples=train_total)

        # model training
        print("####\n\n\nTraining###\n\n")
        timestamps_trainingstart.append(datetime.datetime.now())

        history = model.fit_generator(generator=train_generator,
                                      epochs=num_epochs,
                                      callbacks=callbacks,
                                      workers=0,
                                      validation_data=validation_generator,
                                      use_multiprocessing=False)

        timestamps_trainingend.append(datetime.datetime.now())
        print("Model Training done. Save Performance to Log ... ", end=" ")

        # log the performance of this fold
        val_acc_history = history.history[
            'val_accuracy']  # val_accuracy for Winslow, val_acc local
        val_loss_history = history.history['val_loss']

        all_val_accs.append(val_acc_history)
        all_val_loss.append(val_loss_history)
        print("done.")

    print("=======> Cross Validation - Performance Evaluation <=======")
    timestamps['crossval_end'] = datetime.datetime.now()
    timestamps['trainstarts'] = timestamps_trainingstart
    timestamps['trainends'] = timestamps_trainingend
    train_parameters = count_params(model.trainable_weights)
    record_performance(all_val_accs, all_val_loss, params, timestamps,
                       train_parameters)