def trainModel(model=None):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    # Returns:
    #   model: trained Keras model

    crop_range = 12  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    crop_size = 224
    target_size = crop_size + crop_range - 1  #235

    dataGen = dg_v1.prepDataGen(target_size=target_size, batch_size=64)

    if model is None:
        input_shape = (224, 224, 3)
        model = m_v7.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (7, 2, 96), L1MaxPool_size_stride = (3, 2), \
            L2_size_stride_filters = (5, 2, 256), L2MaxPool_size_stride = (3, 2), \
            L3_size_stride_filters = (3, 1, 384), \
            L4_size_stride_filters = (3, 1, 384),                                 L4_dropout = 0.0, \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), \
            D1_size = 4096, \
            D2_size = 4096)

    full_epochs = 3  # 1 epoch is full pass of data over all variants of 16x16 shifts
    #  12x12 = 144 passes through original images in 1 full epoch

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    for full_epoch in range(full_epochs):

        # for each subframe within a image...
        for epoch_single_subframe in range(crop_range * crop_range):

            #Pick a starting pixel
            h_ind = int(epoch_single_subframe / crop_range)
            w_ind = epoch_single_subframe % crop_range
            size = target_size - crop_range + 1

            #shuffle data upon reset
            dataGen.reset()
            iter_in_epoch = 0

            for X, Y in dataGen:
                #print ("len(dataGen):",str(len(dataGen)))
                X_subframe = X[:, h_ind:h_ind + size, w_ind:w_ind + size, :]
                #print ("X_subframe.shape,X.shape,h_ind,w_ind,size",X_subframe.shape,X.shape,h_ind,w_ind,size)
                model.fit(X_subframe, Y, verbose=0)

                iter_in_epoch += 1
                if iter_in_epoch >= len(dataGen):
                    break

            print("full_epoch, epoch_single_subframe:",
                  time.strftime("%H:%M:%S"), full_epoch, epoch_single_subframe)
        #e_v1.eval(model)
        e_v2.eval(model, target_size=crop_size)
        e_v2.eval(model, target_size=crop_size, test=True)

    return model
def trainModel(model=None):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    # Returns:
    #   model: trained Keras model

    crop_range = 32  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence(target_size=target_size,
                                crop_range=crop_range,
                                batch_size=128,
                                datasrc=datasrc,
                                test=False,
                                debug=True)

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v7.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (7, 2, 96), L1MaxPool_size_stride = (3, 2), \
            L2_size_stride_filters = (5, 2, 256), L2MaxPool_size_stride = (3, 2), \
            L3_size_stride_filters = (3, 1, 384), \
            L4_size_stride_filters = (3, 1, 384), \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), \
            D1_size = 4096, \
            D2_size = 4096)

    full_epochs = 200

    #prepare a validation data generator, used for early stopping
    vldDataGen = dg_v1.prepDataGen(target_size=target_size,
                                   test=True,
                                   batch_size=128,
                                   datasrc=datasrc)
    callback_earlystop = EarlyStopping(monitor='val_acc',
                                       min_delta=0.01,
                                       patience=5,
                                       verbose=1,
                                       mode='max',
                                       restore_best_weights=True)

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator(dataGen,
                        steps_per_epoch=len(dataGen),
                        epochs=full_epochs,
                        verbose=2,
                        validation_data=vldDataGen,
                        validation_steps=len(vldDataGen),
                        callbacks=[callback_earlystop])

    e_v3.eval(model, target_size=target_size, datasrc=datasrc)
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
def eval ( model, target_size=150, datasrc = "selfCreatedGoogle", eval_test=True, eval_train=True ):
    # Evaluates a given model against train and test sets; prints result on screen
    #
    #   model: trained Keras model
    #
    # Returns: 
    #   [Loss, accuracy] for [Train, Validation] sets

    ev_test = None
    if eval_test:
        testDataGen = dg_v1.prepDataGen ( target_size=target_size, datasrc = datasrc, test=True )

        ev_test = model.evaluate_generator ( testDataGen, verbose=1, steps=len(testDataGen))

    ev_train = None
    if eval_train:
        trainDataGen = dg_v1.prepDataGen( target_size=target_size, datasrc = datasrc )

        ev_train = model.evaluate_generator ( trainDataGen, verbose=1, steps=len(trainDataGen))
    #ev_train = model.evaluate_generator ( trainDataGen, verbose=1, steps=1)


    return (ev_train, ev_test)
Example #4
0
def trainModel():
    # Trains a model
    #
    # Returns: 
    #   model: trained Keras model

    dataGen = dg_v1.prepDataGen()

    model = m_v3.prepModel()

    epochs = 50

    model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=1 )

    return model
def trainModel(model=None):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    # Returns:
    #   model: trained Keras model

    target_size = 161
    dataGen = dg_v1.prepDataGen(target_size)

    if model is None:
        model = m_v1.prepModel()

    full_epochs = 1  # 1 epoch is full pass of data over all variants of 12x12 shifts
    #  12x12 = 144 passes through original images in 1 full epoch
    crop_range = 12  # number of pixels to crop image (if size is 161, crops are 0-149, 1-150, ... 11-160)

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    for full_epoch in range(full_epochs):

        # for each subframe within a image...
        for epoch_single_subframe in range(crop_range * crop_range):

            #Pick a starting pixel
            h_ind = int(epoch_single_subframe / crop_range)
            w_ind = epoch_single_subframe % crop_range
            size = target_size - crop_range + 1

            #shuffle data upon reset
            dataGen.reset()
            iter_in_epoch = 0

            for X, Y in dataGen:
                #print ("len(dataGen):",str(len(dataGen)))
                X_subframe = X[:, h_ind:h_ind + size, w_ind:w_ind + size, :]
                #print ("X_subframe.shape,X.shape,h_ind,w_ind,size",X_subframe.shape,X.shape,h_ind,w_ind,size)
                model.fit(X_subframe, Y, verbose=0)

                iter_in_epoch += 1
                if iter_in_epoch >= len(dataGen):
                    break

            print("full_epoch, epoch_single_subframe:",
                  time.strftime("%H:%M:%S"), full_epoch, epoch_single_subframe)
        #e_v1.eval(model)
        #e_v2.eval(model)
        #e_v2.eval(model, test=True)

    return model
def trainModel( model = None, epochs = 1):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    # Returns: 
    #   model: trained Keras model

    crop_range = 32 # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14_50classes"
    #datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence ( target_size=target_size, crop_range=crop_range, batch_size=128, datasrc=datasrc, test=False, debug=True )

    if model is None:
        input_shape = (target_size, target_size, 3)
        model = m_v8.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (11, 4, 96), L1MaxPool_size_stride = (3, 2), L1_dropout = 0.0, \
            L2_size_stride_filters = (5, 1, 256), L2MaxPool_size_stride = (3, 2), L2_dropout = 0.0, \
            L3_size_stride_filters = (3, 1, 384),                                 L3_dropout = 0.0, \
            L4_size_stride_filters = (3, 1, 384),                                 L4_dropout = 0.0, \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), L5_dropout = 0.0, \
            D1_size = 4096,                                                       D1_dropout = 0.2, \
            D2_size = 4096,                                                       D2_dropout = 0.3, \
            Softmax_size = 50, \
            Conv_padding = "same" )

    #prepare a validation data generator, used for early stopping
    vldDataGen = dg_v1.prepDataGen( target_size=target_size, test=True, batch_size=128, datasrc=datasrc )
    callback_earlystop = EarlyStopping ( monitor='val_acc', min_delta=0.001, patience=20, verbose=1, mode='max', restore_best_weights=True )

    # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
    model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
    #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2 )
    #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2 )

    print ("Evaluation on train set (1 frame)")
    e_v2.eval(model, target_size=target_size, datasrc=datasrc)
    print ("Evaluation on validation set (1 frame)")
    e_v2.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    print ("Evaluation on validation set (5 frames)")
    e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    print ("Evaluation on validation set (10 frames)")
    e_v4.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
Example #7
0
def trainModel():
    # Trains a model
    #
    # Returns: 
    #   model: trained Keras model

    dataGen = dg_v1.prepDataGen()

    model = m_v1.prepModel()

    epochs = 50

    model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=1 )

    # model.save("Train_v1_simple1.h5")
    # model = load_model("Train_v1_simple1.h5")

    return model
Example #8
0
def getHighestActivations ( model, layer_index, map_to_image_patch_multiplier, map_to_image_patch_size, \
    cnt_activations = 90, cnt_images_per_activation = 16, debug = False ):
    # Calculates 90 random activations (features) and returns 16 highest-activation-having-images' related patches for each feature
    #
    #   model - already trained Keras model
    #   layer_index - sequence number of the desired activations layer
    #   map_to_image_patch_multiplier, map_to_image_patch_size - for input patch's calculation
    #   cnt_activations - number of random activations (features) to get max-having-images for
    #   cnt_images_per_activation - number of images per feature to return
    # Returns: tuple of
    #   high_activation_values - highest activation values np.array[90,16]
    #   high_activation_imgs - highest-activation-having-images' related patches for display list [90][16]
    #   

    # Prepare data generator
    dataGen = dg_v1.prepDataGen( target_size=224, test = False, batch_size = 64, datasrc="ilsvrc14")

    # Activation layer's dimensions (# 0th dim is #samples)
    activation_dim = (int ( model.layers [ layer_index ].output.shape[1] ), \
                      int ( model.layers [ layer_index ].output.shape[2] ), \
                      int ( model.layers [ layer_index ].output.shape[3] ) )

    if debug:
        #Print basic info about the activation:
        print ("Activation layer:", model.layers [ layer_index ].name, activation_dim )

    ## For reproducability
    #np.random.seed(111)

    # Get a function to calculate output of the layer
    func_activation = function ( [ model.input ], [ model.layers [ layer_index ].output ] )

    # 1.Randomly pick 90 features from shape of layer - will later display 3x3 activations in a single figure, 10 times
    h_activation = np.random.randint ( 0, activation_dim [ 0 ], cnt_activations )
    w_activation = np.random.randint ( 0, activation_dim [ 1 ], cnt_activations )
    c_activation = np.random.randint ( 0, activation_dim [ 2 ], cnt_activations )

    # 2.Calulate image patch locations based on features randomly selected (bellow formulas work with no padding)
    h_img_patch_start = h_activation * map_to_image_patch_multiplier
    h_img_patch_end = h_img_patch_start + map_to_image_patch_size
    w_img_patch_start = w_activation * map_to_image_patch_multiplier
    w_img_patch_end = w_img_patch_start + map_to_image_patch_size
    
    if debug:
        #Print sample feature and it's corresponding image patch:
        print ("Activation[0]: (", h_activation[0], w_activation[0], c_activation[0], \
            "); Image patch[0]: [", h_img_patch_start[0], ":", h_img_patch_end[0], "],[",\
            w_img_patch_start[0], ":" ,w_img_patch_end[0], "]" )

    # 3. Initialize structure to hold max activations values and image pathes having those activations (for display later) 
    #    list of 16 max activation values (for each of 90 pixels randomly selected)
    high_activation_values = np.zeros ( ( cnt_activations, cnt_images_per_activation ) )
    #    list of 16 images (with max activations) to be later displayed
    high_activation_imgs = [ [ np.random.rand ( map_to_image_patch_size, map_to_image_patch_size , 3 ) for i in range ( cnt_images_per_activation ) ] for i in range ( cnt_activations ) ]
    #    how many images activated the given feature?
    high_activation_cnts = np.zeros ( cnt_activations, int )

    # 4. Loop through data to get highest activations
    iter_in_epoch = 0
    for X, y in dataGen:

        # 4A. Calculate activation layer's output
        output_activation = func_activation ( [ X ] ) [0] 

        # 4B. Acumulate highest activation values and corresponding picture patches into array
        for activation_index in range ( cnt_activations ):
            for img_index in range ( X.shape [0] ):
                high_activation_value_min_index = np.argmin ( high_activation_values [ activation_index , : ] )
                activation_value = output_activation [ \
                    img_index, \
                    h_activation [ activation_index ], \
                    w_activation [ activation_index ], \
                    c_activation [ activation_index ] ]

                #Replace if current image's activation value is higher
                if activation_value > high_activation_values [ activation_index, high_activation_value_min_index ]:
                    #Print if higher activation found
                    #if debug and activation_index==0:
                    #    print ("img_index, iter_in_epoch, old_value, new_value:",\
                    #        img_index, iter_in_epoch,\
                    #        high_activation_values [ activation_index, high_activation_value_min_index ],\
                    #        activation_value)
                    # Replace activation value
                    high_activation_values [ activation_index, high_activation_value_min_index ] = activation_value
                    # Replace image patch
                    high_activation_imgs [ activation_index ] [ high_activation_value_min_index ] = np.copy ( \
                        X [ img_index,\
                        h_img_patch_start [ activation_index ] : h_img_patch_end [ activation_index ] ,\
                        w_img_patch_start [ activation_index ] : w_img_patch_end [ activation_index ] ,\
                        : ] )
                    # Increase counter of images
                    high_activation_cnts [ activation_index ] += 1
                    
        iter_in_epoch += 1
        if debug and iter_in_epoch % 50 == 0:
            print ("iter_in_epoch:",iter_in_epoch)
        if iter_in_epoch >= len(dataGen):
            break

    # Eliminate activations which don't have at least 16 images which activate them
    #print ("high_activation_cnts:",high_activation_cnts)
    activation_whereimagesexist_indexes = np.where ( high_activation_cnts >= cnt_images_per_activation )[0]
    high_activation_values = high_activation_values [ activation_whereimagesexist_indexes, : ]
    high_activation_imgs = [ np.copy ( high_activation_imgs [i] ) for i in activation_whereimagesexist_indexes ]

    return ( high_activation_values, high_activation_imgs )
def trainModel(epochs=2, cntExperiments=2):
    # Trains a model
    #   model = optional parameter; creates new if not passed; otherwise keeps training
    #   epochs - number of max epochs to train (subject to early stopping)
    #   cntExperiments - count of experiments to run using various random dropout rates
    # Returns:
    #   model: trained Keras model

    crop_range = 32  # number of pixels to crop image (if size is 235, crops are 0-223, 1-224, ... 11-234)
    target_size = 224
    datasrc = "ilsvrc14_50classes"
    #datasrc = "ilsvrc14"

    #dataGen = dg_v1.prepDataGen(target_size = target_size, batch_size = 64 )
    dataGen = as_v3.AugSequence(target_size=target_size,
                                crop_range=crop_range,
                                batch_size=128,
                                datasrc=datasrc,
                                test=False,
                                debug=True)

    #prepare a validation data generator, used for early stopping
    vldDataGen = dg_v1.prepDataGen(target_size=target_size,
                                   test=True,
                                   batch_size=128,
                                   datasrc=datasrc)

    input_shape = (target_size, target_size, 3)

    # open a file for writing the results
    res_file = open("train_v50.dropout.results.csv", "w")
    res_file.write(
        "Experiment No,cnn1,cnn2,cnn3,cnn4,cnn5,d1,d2,train accuracy,train top5,test accuracy, test top5\n"
    )

    for exper in range(cntExperiments):

        cnn_dropout = np.array([0., 0., 0., 0., 0.])
        incl_cnn_droput = np.random.rand(
        ) > 0.0  # 70% chance of using dropout in CNN layers

        # Each dropout value has a 30% chance to be 0; then equal chance of 10%, 20%, ... 70%
        if incl_cnn_droput:
            cnn_dropout = np.maximum(0, np.random.randint(0, 10, 5) - 2) * 0.1

        dense_dropout = np.maximum(0, np.random.randint(0, 10, 2) - 2) * 0.1

        model = m_v8.prepModel ( input_shape = input_shape, \
            L1_size_stride_filters = (11, 4, 96), L1MaxPool_size_stride = (3, 2), L1_dropout = cnn_dropout[0], \
            L2_size_stride_filters = (5, 1, 256), L2MaxPool_size_stride = (3, 2), L2_dropout = cnn_dropout[1], \
            L3_size_stride_filters = (3, 1, 384),                                 L3_dropout = cnn_dropout[2], \
            L4_size_stride_filters = (3, 1, 384),                                 L4_dropout = cnn_dropout[3], \
            L5_size_stride_filters = (3, 1, 256), L5MaxPool_size_stride = (3, 2), L5_dropout = cnn_dropout[4], \
            D1_size = 4096,                                                       D1_dropout = dense_dropout[0], \
            D2_size = 4096,                                                       D2_dropout = dense_dropout[1], \
            Softmax_size = 50, \
            Conv_padding = "same" )

        #callback_earlystop = EarlyStopping ( monitor='val_acc', min_delta=0.001, patience=20, verbose=1, mode='max', restore_best_weights=True )

        # full epoch is 12x12 = 144 passes over data: 1 times for each subframe
        #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
        #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2, validation_data=vldDataGen, validation_steps=len(vldDataGen), callbacks=[callback_earlystop] )
        model.fit_generator(dataGen,
                            steps_per_epoch=len(dataGen),
                            epochs=epochs,
                            verbose=2,
                            validation_data=vldDataGen,
                            validation_steps=len(vldDataGen))
        #model.fit_generator ( dataGen, steps_per_epoch=len(dataGen), epochs=epochs, verbose=2 )
        #model.fit_generator ( dataGen, steps_per_epoch=1, epochs=epochs, verbose=2 )

        #print ("Evaluation on train set (1 frame)")
        (ev_train, ev_test) = e_v1.eval(model,
                                        target_size=target_size,
                                        datasrc=datasrc)

        res_line = str(exper) + "," + \
            str(cnn_dropout[0]) + "," + \
            str(cnn_dropout[1]) + "," + \
            str(cnn_dropout[2]) + "," + \
            str(cnn_dropout[3]) + "," + \
            str(cnn_dropout[4]) + "," + \
            str(dense_dropout[0]) + "," + \
            str(dense_dropout[1]) + "," + \
            str(ev_train[1]) + "," + \
            str(ev_train[2]) + "," + \
            str(ev_test[1]) + "," + \
            str(ev_test[2]) + "\n"
        res_file.write(res_line)
        res_file.flush()

    res_file.close()
    #print ("Evaluation on validation set (1 frame)")
    #e_v2.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    #print ("Evaluation on validation set (5 frames)")
    #e_v3.eval(model, target_size=target_size, datasrc=datasrc, test=True)
    #print ("Evaluation on validation set (10 frames)")
    #e_v4.eval(model, target_size=target_size, datasrc=datasrc, test=True)

    return model
def visualInit():
    # Visualizes pictures randomly; waits for key-pres between visualizations
    #
    model = load_model("C:\\labs\models\\model_v22.h5")
    #model = load_model ("D:\\ILSVRC14\\models\\model_v59.h5", custom_objects={'top_5': m_v11.top_5)

    dataGen = dg_v1.prepDataGen()

    # New plot
    rows = 8
    columns = 4
    fig = plt.figure(figsize=(columns * 2, rows))
    #fig.patch.set_visible(False)
    ims = []
    ims_data = {}

    #Remove margins
    plt.subplots_adjust(left=0.,
                        bottom=0.,
                        right=1.,
                        top=1.,
                        wspace=0.,
                        hspace=0.)

    #Plot in interactive mode - does not block command line
    plt.ion()

    for imgind in range(rows * columns):
        subplot = fig.add_subplot(rows, columns * 2, imgind * 2 + 1)
        #No labels and markings on axis
        _ = subplot.set_xticklabels([])
        _ = subplot.set_yticklabels([])
        _ = subplot.set_xticks([])
        _ = subplot.set_yticks([])
        #add image to array (iteratively will change data of this for speed rather than replacing image)
        im = subplot.imshow(np.random.rand(150, 150, 3))  #, cmap='gray')
        ims = np.append(ims, im)

        subplot_lbl = fig.add_subplot(rows, columns * 2, imgind * 2 + 2)
        #No labels and markings on axis
        _ = subplot_lbl.set_xticklabels([])
        _ = subplot_lbl.set_yticklabels([])
        _ = subplot_lbl.set_xticks([])
        _ = subplot_lbl.set_yticks([])

        # Predicted labels and percentages
        #pred1 = subplot_lbl.text(x=0.5,y=0.8,s="pred1", ha="center", va="center", fontsize=8, color="red")
        #ims_data[ "pred1." + str(imgind) ] = pred1
        values = np.random.rand(5)
        values_lbl = [("%.2f" % value) for value in values]
        pred_bars = subplot_lbl.barh(
            y=np.arange(5), width=values)  #, tick_label = values_lbl )
        for bar_ind in np.arange(len(pred_bars)):
            bar = pred_bars[bar_ind]
            width = bar.get_width()
            xloc = 0.98 * width
            #clr = 'red'
            #align =
            yloc = bar.get_y() + bar.get_height() / 2.0
            #print("xloc, yloc, values_lbl[bar_ind]",xloc, yloc, values_lbl[bar_ind])
            label = subplot_lbl.text(xloc,
                                     yloc,
                                     values_lbl[bar_ind],
                                     horizontalalignment='right',
                                     verticalalignment='center',
                                     color='black',
                                     fontsize=6,
                                     clip_on=True)

        # Correct label
        #sw = subplot_lbl.get_width()
        #sh = subplot_lbl.get_height()
        #print ("sw, sh:",sw, sh)
        lbl = subplot_lbl.text(x=0.2,
                               y=0,
                               s="actual",
                               ha="left",
                               va="center",
                               fontsize=8)
        ims_data["lbl." + str(imgind)] = lbl
        ims_data["lbl_sub." + str(imgind)] = subplot_lbl

    plt.show()

    cache = {}
    cache["ims"] = ims
    cache["ims_data"] = ims_data
    cache["fig"] = fig
    cache["rows"] = rows
    cache["columns"] = columns
    cache["dataGen"] = dataGen
    cache["model"] = model

    return cache