Esempio n. 1
0
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='toe_segmentation2', epochs=2000, batch_size=128)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='toe_segmentation2', batch_size=32)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='toe_segmentation2', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='toe_segmentation2', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='proximal_phalanx_tw', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='proximal_phalanx_tw',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='proximal_phalanx_tw', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='proximal_phalanx_tw', class_id=0)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='ozone',
                epochs=600,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='ozone',
                   batch_size=128)
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='medical_images', epochs=2000, batch_size=64)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='medical_images', batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='medical_images', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='medical_images', class_id=0)
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='computers', epochs=2000, batch_size=128)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='computers', batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='computers', visualize_sequence=True,
    #                         visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='computers', class_id=0)
Esempio n. 6
0
    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='cricket_z', epochs=2000, batch_size=64,
    #            cutoff=None)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='cricket_z',
                   batch_size=128,
                   cutoff=None)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='cricket_z', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='cricket_z', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='fifty_words', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='fifty_words',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='fifty_words', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='fifty_words', class_id=0)
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='cmu_subject_16_', epochs=1000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='cmu_subject_16_',
                   batch_size=128)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='floodPrediction_',
                epochs=200,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='floodPrediction_',
                   batch_size=128)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='hand_outlines', epochs=2000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='hand_outlines',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='hand_outlines', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='hand_outlines', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='shapelet_sim', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='shapelet_sim',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='shapelet_sim', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='shapelet_sim', class_id=0)
    model.summary()

    return model


if __name__ == "__main__":

    # generate_ model
    model5 = generate_model_5()  #
    print("GRU-FCN")
    history = train_model(model5,
                          DATASET_INDEX,
                          dataset_prefix='adiac',
                          epochs=4000,
                          batch_size=128)
    accuracy5, loss5 = evaluate_model(model5,
                                      DATASET_INDEX,
                                      dataset_prefix='adiac',
                                      batch_size=128)

    print("--- Run Time = %s seconds ---" % ((time.time() - start_time)))
    print("--- Run Time = %s minutes ---" %
          ((time.time() - start_time) / 60.0))
    text_file = open("training_time.txt", "w")
    text_file.write("--- Run Time =" + str(((time.time() - start_time))) +
                    " seconds ---" + "\n" + "--- Run Time = " +
                    str(((time.time() - start_time) / 60.0)) + " minutes ---" +
                    "\n")
    print(history.history.keys()
          )  #dict_keys(['val_loss', 'val_acc', 'loss', 'acc', 'lr'])
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='arabic_voice', epochs=600, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='arabic_voice',
                   batch_size=128)
Esempio n. 14
0
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='ecg200', epochs=8000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='ecg200',
                   batch_size=64)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='ecg200', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='ecg200', class_id=0)
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='action_3d', epochs=600, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='action_3d',
                   batch_size=128)
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='phalanx_outline_timesequence', epochs=2000, batch_size=128)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='phalanx_outline_timesequence', batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='phalanx_outline_timesequence', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='phalanx_outline_timesequence', class_id=0)
Esempio n. 17
0
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='beetle_fly', epochs=8000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='beetle_fly',
                   batch_size=64)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='beetle_fly', visualize_sequence=True,
    #                         visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='beetle_fly', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='two_lead_ecg', epochs=2000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='two_lead_ecg',
                   batch_size=64)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='two_lead_ecg', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='two_lead_ecg', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='face_all', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='face_all',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='face_all', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='face_all', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='earthquakes', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='earthquakes',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='earthquakes', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='earthquakes', class_id=0)
Esempio n. 21
0
    y = Conv1D(128, 3, padding='same', kernel_initializer='he_uniform')(y)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='plane', epochs=200, batch_size=16)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='plane', batch_size=16)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='plane', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='plane', class_id=0)
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model()

    train_model(model,
                DATASET_INDEX,
                dataset_prefix='daily_sport_no_attention',
                epochs=500,
                batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='daily_sport_no_attention',
                   batch_size=128)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='large_kitchen_appliances', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='large_kitchen_appliances',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='large_kitchen_appliances', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='large_kitchen_appliances', class_id=0)
Esempio n. 24
0
    # add load model code here to fine-tune

    return model

def squeeze_excite_block(input):
    ''' Create a squeeze-excite block
    Args:
        input: input tensor
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1] # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,  activation='relu', kernel_initializer='he_normal', use_bias=False)(se)
    se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='auslan_', epochs=1000, batch_size=128)

    evaluate_model(model, DATASET_INDEX, dataset_prefix='auslan_', batch_size=128)
        filters: number of output filters
        k: width factor

    Returns: a keras tensor
    '''
    filters = input._keras_shape[-1]  # channel_axis = -1 for TF

    se = GlobalAveragePooling1D()(input)
    se = Reshape((1, filters))(se)
    se = Dense(filters // 16,
               activation='relu',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = Dense(filters,
               activation='sigmoid',
               kernel_initializer='he_normal',
               use_bias=False)(se)
    se = multiply([input, se])
    return se


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='japanese_vowels_', epochs=1000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='japanese_vowels_',
                   batch_size=128)
    y = BatchNormalization()(y)
    y = Activation('relu')(y)

    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model()
    #train_model(model, DATASET_INDEX, dataset_prefix='baxter_kitting_experiment_', epochs=200, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='baxter_kitting_experiment_',
                   batch_size=64)

    #visualize_context_vector(model, DATASET_INDEX, dataset_prefix='ecg200', visualize_sequence=True, visualize_classwise=True, limit=1)

    #visualize_cam(model, DATASET_INDEX, dataset_prefix='ecg200', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    # train_model(model, DATASET_INDEX, dataset_prefix='sony_aibo_2', epochs=2000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='sony_aibo_2',
                   batch_size=64)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='sony_aibo_2', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='sony_aibo_2', class_id=0)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='phalanges_outline_correct', epochs=2000, batch_size=64)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='phalanges_outline_correct',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='phalanges_outline_correct', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='phalanges_outline_correct', class_id=0)
                model = model_fn(MAX_SEQUENCE_LENGTH, NB_CLASS, cell)

                print('*' * 20, "Training model for dataset %s" % (dname),
                      '*' * 20)

                # comment out the training code to only evaluate !
                train_model(model,
                            did,
                            dataset_name_,
                            epochs=2000,
                            batch_size=128,
                            normalize_timeseries=normalize_dataset)

                acc = evaluate_model(model,
                                     did,
                                     dataset_name_,
                                     batch_size=128,
                                     normalize_timeseries=normalize_dataset)

                s = "%d,%s,%s,%0.6f\n" % (did, dname, dataset_name_, acc)

                file.write(s)
                file.flush()

                successes.append(s)

                # except Exception as e:
                #     traceback.print_exc()
                #
                #     s = "%d,%s,%s,%s\n" % (did, dname, dataset_name_, 0.0)
                #     failures.append(s)
    y = GlobalAveragePooling1D()(y)

    x = concatenate([x, y])

    out = Dense(NB_CLASS, activation='softmax')(x)

    model = Model(ip, out)

    model.summary()

    # add load model code here to fine-tune

    return model


if __name__ == "__main__":
    model = generate_model_2()

    #train_model(model, DATASET_INDEX, dataset_prefix='herring', epochs=2000, batch_size=128)

    evaluate_model(model,
                   DATASET_INDEX,
                   dataset_prefix='herring',
                   batch_size=128)

    # visualize_context_vector(model, DATASET_INDEX, dataset_prefix='herring', visualize_sequence=True,
    #                          visualize_classwise=True, limit=1)

    # visualize_cam(model, DATASET_INDEX, dataset_prefix='herring', class_id=0)