コード例 #1
0
def func3(shape):
	from keras.applications import ResNet152
	BS = 8
	conv_base = ResNet152(weights = 'imagenet',
                 	include_top = False,
                 	input_shape = (shape[0],shape[1],3))
	return BS,conv_base
コード例 #2
0
def get_model(model_name):

    if model_name == 'VGG16':

        from keras.applications import VGG16
        model = VGG16(weights="imagenet", include_top=False, pooling='avg')
        size = 512  # if pooling is 'avg', else  512 * 7 * 7 if pooling is 'None'

    elif model_name == 'ResNet50':

        from keras.applications import ResNet50
        model = ResNet50(weights="imagenet", include_top=False, pooling='avg')
        size = 2048,  # if pooling is 'avg',  2048 * 7 * 7 if pooling is 'None'

    elif model_name == 'ResNet152':

        from keras.applications import ResNet152
        model = ResNet152(weights="imagenet", include_top=False, pooling='avg')
        size = 2048  # if pooling is 'avg', 2048 * 7 * 7 # if pooling is 'None'

    elif model_name == 'ResNet152V2':

        from keras.applications import ResNet152V2
        model = ResNet152V2(weights="imagenet",
                            include_top=False,
                            pooling='avg')
        size = 2048  # if pooling is 'avg', 2048 * 7 * 7 # if pooling is 'None'

    elif model_name == 'DenseNet121':

        from keras.applications import DenseNet121
        model = DenseNet121(weights="imagenet",
                            include_top=False,
                            pooling='avg'),
        size = 1024  # if pooling is 'avg'

    elif model_name == 'Custom':

        ## CUSTOM MODEL

        from keras.models import load_model
        model = load_model(config.FINE_TUNED_MODEL)
        size = 2048  # our trained models are based on ResNet152

    else:

        raise ValueError(
            "Model needs to be defined. Examples: VGG16 or ResNet50.")

    return model, size
コード例 #3
0
    def __init__(self, IMG_SIZE=(224, 224), model_path="model/"):
        self.model_res = ResNet152(weights="imagenet",
                                   include_top=False,
                                   pooling='avg')
        self.IMG_SIZE = IMG_SIZE
        input_layer = self.model_res.layers[0]
        self.model = load_model(model_path)
        self.model_final = Model(self.model_res.input,
                                 self.model(self.model_res.output))

        image_output = self.model_final.output[:]
        last_conv_layer = self.model_final.get_layer('conv5_block3_out')
        grads = K.gradients(image_output, last_conv_layer.output)[0]
        pooled_grads = K.mean(grads, axis=(0, 1, 2))
        self.iterate = K.function([self.model_final.input],
                                  [pooled_grads, last_conv_layer.output[0]])
コード例 #4
0
def build_ResNet152(input_tensor_shape):
    base_model = ResNet152(weights='imagenet',
                           include_top=False,
                           input_shape=input_tensor_shape)

    x_model = base_model.output

    x_model = AvgPool2D(name='globalaveragepooling2d')(x_model)

    x_model = Dense(1024, activation='relu', name='fc1_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_1')(x_model)
    x_model = Flatten()(x_model)
    x_model = Dense(256, activation='relu', name='fc2_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_2')(x_model)

    predictions = Dense(3, activation='sigmoid', name='output_layer')(x_model)

    model = Model(inputs=base_model.input, outputs=predictions)

    return model
コード例 #5
0
    def __init__(self, pretrained_model, input_shape, num_classes):
        Model.__init__(self)

        #picking vgg16 as pretrained (base) model https://keras.io/applications/#vgg16
        if pretrained_model == "vgg16":
            conv_base = VGG16(weights="imagenet",
                              include_top=False,
                              input_shape=input_shape)
        elif pretrained_model == "resnet50":
            conv_base = ResNet50(weights="imagenet",
                                 include_top=False,
                                 input_shape=input_shape)
        elif pretrained_model == "resnet152":
            conv_base = ResNet152(weights="imagenet",
                                  include_top=False,
                                  input_shape=input_shape)
        elif pretrained_model == "inceptionv3":
            conv_base = InceptionV3(weights="imagenet",
                                    include_top=False,
                                    input_shape=input_shape)
        for layer in conv_base.layers:
            layer.trainable = False

        #maybe unfreeze last layer
        conv_base.layers[-2].trainable = True

        self.model.add(conv_base)
        self.model.add(Flatten())
        self.model.add(Dropout(0.33))
        self.model.add(Dense(48, activation='relu'))  #64
        self.model.add(Dropout(0.33))
        self.model.add(Dense(48, activation='relu'))  #48
        self.model.add(Dropout(0.33))
        self.model.add(Dense(num_classes, activation='softmax'))

        self.model.compile(loss=keras.losses.categorical_crossentropy,
                           optimizer=keras.optimizers.Adadelta(),
                           metrics=['accuracy'])
        self.model.summary()
コード例 #6
0
ファイル: model.py プロジェクト: Tjocksockar/AdvDL_project
def build_resnet_model2(classes, version=50, input_shape=(224, 224, 3)):
    if version == 50:
        model = ResNet50(include_top=False,
                         weights='imagenet',
                         input_shape=input_shape)
    if version == 101:
        model = ResNet101(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    if version == 152:
        model = ResNet152(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    model.trainable = False
    model_input = model.input
    X = model.layers[-1]
    #conv_model = Model(inputs=model.input, outputs=transfer_layer.output)
    X = Flatten()(X.output)
    X = Dense(1024, activation='relu')(X)
    X = Dropout(0.25)(X)
    X = Dense(classes, activation='softmax')(X)
    new_model = Model(inputs=model_input, outputs=X)
    return input_shape, new_model
コード例 #7
0
def get_features_from_image_list(images_list,
                                 img_path,
                                 batch_size=16,
                                 symmetric=False):
    model = ResNet152(weights="imagenet", include_top=False, pooling='avg')
    size = 2048
    widgets = [
        "Extracting Features: ",
        progressbar.Percentage(), " ",
        progressbar.Bar(), " ",
        progressbar.ETA()
    ]
    pbar = progressbar.ProgressBar(maxval=len(images_list),
                                   widgets=widgets).start()
    features_full = []
    for i in np.arange(0, len(images_list), batch_size):
        batch_paths = images_list[i:i + batch_size]
        batch_images = []
        for img in batch_paths:
            image = load_img(img_path + img, target_size=IMG_SIZE)
            image = img_to_array(image)
            if symmetric:
                image = np.array(list(map(lambda x: x[::-1], image)))
            image = np.expand_dims(image, axis=0)
            batch_images.append(image)

        batch_images = np.vstack(batch_images)
        features = model.predict(batch_images, batch_size=batch_size)
        features = features.reshape((features.shape[0], size))
        features_full.append(features)
        try:
            pbar.update(i)
        except:
            pass
    pbar.finish()

    return np.vstack(features_full)
コード例 #8
0
ファイル: model.py プロジェクト: Tjocksockar/AdvDL_project
def build_resnet_model(classes, version=50, input_shape=(224, 224, 3)):
    if version == 50:
        model = ResNet50(include_top=False,
                         weights='imagenet',
                         input_shape=input_shape)
    if version == 101:
        model = ResNet101(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    if version == 152:
        model = ResNet152(include_top=False,
                          weights='imagenet',
                          input_shape=input_shape)
    print('Using ResNet' + str(version))
    transfer_layer = model.layers[-1]
    #conv_model = Model(inputs=model.input, outputs=transfer_layer.output)
    new_model = Sequential()
    new_model.add(model)
    new_model.add(Flatten())
    new_model.add(Dense(1024, activation='relu'))
    new_model.add(Dropout(0.25))
    new_model.add(Dense(classes, activation='softmax'))
    model.trainable = False
    return input_shape, new_model
コード例 #9
0
def resnet():
    model = ResNet152(weights='imagenet', include_top=True)
    return model
コード例 #10
0
print('[INFO] {} as feature extractor...'.format(pretrained_nn))
# # Get features
X_features = get_features_from_last_layer_pretrained_nn(
    X['video'], config.DEV_FRAMES, pretrained_nn, 'train_val')
# Get test features
IMG_SIZE = (224, 224)
batch_images = []
for image_path in X_test['image_path']:
    print('Loading image: {}'.format(image_path))
    image = load_img(image_path, target_size=IMG_SIZE)
    image = img_to_array(image)
    image = np.expand_dims(image, axis=0)
    image = imagenet_utils.preprocess_input(image)
    batch_images.append(image)
batch_images = np.vstack(batch_images)
model = ResNet152(weights="imagenet", include_top=False, pooling='avg')
size = 2048
features = model.predict(batch_images)
features = features.reshape((features.shape[0], size))
X_test_features = []
for feature in features:
    feature = np.stack((feature, ) * len(config.FRAME_NUMBERS),
                       axis=1).flatten()
    X_test_features.append(feature)
# Fit and predict
predictions_features = fit_predict(X_features, Y, X_test_features,
                                   pretrained_nn)
# Save predictions
preds_filename = 'images/{}_pretrained'.format(config.TARGET)
np.save(preds_filename, predictions_features)
コード例 #11
0
ファイル: ResNets .py プロジェクト: Boltuzamaki/Easy-CV
def Resnet_Net(trainable=None, net="ResNet50"):

    netold = ['ResNet50', 'ResNet101', 'ResNet152']
    # Preprocessing the dataset into keras feedable format
    if net not in netold:
        train_datagen = ImageDataGenerator(rotation_range=rotation,
                                           width_shift_range=width_shift,
                                           height_shift_range=height_shift,
                                           rescale=scale,
                                           shear_range=shear,
                                           zoom_range=zoom,
                                           horizontal_flip=horizontal,
                                           fill_mode=fill,
                                           validation_split=validation)
        test_datagen = ImageDataGenerator(rescale=scale, )
    if net in netold:
        train_datagen = ImageDataGenerator(
            dtype='float32',
            preprocessing_function=preprocess_input,
            validation_split=validation)
        test_datagen = ImageDataGenerator(
            dtype='float32', preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = [
        'ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2',
        'ResNet152V2'
    ]

    # Loading the ResNet50 Model

    if net == "ResNet50":
        resnet = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=input_sh,
                          pooling=pooling_model)
    if net == "ResNet101":
        resnet = ResNet101(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet152":
        resnet = ResNet152(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet50V2":
        resnet = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=input_sh,
                            pooling=pooling_model)
    if net == "ResNet101V2":
        resnet = ResNet101V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "ResNet152V2":
        resnet = ResNet152V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = resnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    resnet = Model(resnet.input, output=output)
    print(resnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet  -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in resnet.layers[:trainable]:
            layer.trainable = False
        for layer in resnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
コード例 #12
0
ファイル: feedforward_nn.py プロジェクト: tesseract-42/libra
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  fine_tune=False,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None,
                  save_as_tfjs=None,
                  save_as_tflite=None,
                  generate_plots=True):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    LR = 0.001
    plots = {}
    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural network dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenet":
                base_model = MobileNet(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "mobilenetv2":
                base_model = MobileNetV2(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet121":
                base_model = DenseNet121(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet169":
                base_model = DenseNet169(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "densenet201":
                base_model = DenseNet201(include_top=False,
                                         weights='imagenet',
                                         input_shape=input_shape)
                x = fine_tuned_model(base_model)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenet":
                model = MobileNet(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "mobilenetv2":
                model = MobileNetV2(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet121":
                model = DenseNet121(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet169":
                model = DenseNet169(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            elif arch_lower == "densenet201":
                model = DenseNet201(include_top=True,
                                    weights=None,
                                    classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    if pretrained and 'weights' in pretrained and pretrained.get(
            'weights') == 'imagenet':
        for layer in base_model.layers:
            layer.trainable = False

    opt = Adam(learning_rate=LR)

    model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")

    print("\n")
    logger('Training image model')

    # model.summary()

    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    if fine_tune:

        logger(
            '->', 'Training accuracy: {}'.format(
                history.history['accuracy'][len(history.history['accuracy']) -
                                            1]))
        logger(
            '->',
            'Validation accuracy: {}'.format(history.history['val_accuracy'][
                len(history.history['val_accuracy']) - 1]))

        for layer in base_model.layers:
            layer.trainable = True

        opt = Adam(learning_rate=LR / 10)

        model.compile(optimizer=opt, loss=loss_func, metrics=['accuracy'])

        print("\n\n")
        logger('Training fine tuned model')

        fine_tuning_epoch = epochs + 10
        history_fine = model.fit_generator(
            X_train,
            steps_per_epoch=X_train.n // X_train.batch_size,
            validation_data=X_test,
            validation_steps=X_test.n // X_test.batch_size,
            epochs=fine_tuning_epoch,
            initial_epoch=history.epoch[-1],
            verbose=verbose)
        #frozen model acc and loss history
        acc = history.history['accuracy']
        val_acc = history.history['val_accuracy']

        loss = history.history['loss']
        val_loss = history.history['val_loss']

        #fine tuned model acc and loss history
        acc += history_fine.history['accuracy']
        val_acc += history_fine.history['val_accuracy']

        loss += history_fine.history['loss']
        val_loss += history_fine.history['val_loss']

        if generate_plots:
            plots = generate_fine_tuned_classification_plots(
                acc, val_acc, loss, val_loss, epochs)

    models = []
    losses = []
    accuracies = []
    model_data = []

    model_data.append(model)
    models.append(history)

    losses.append(
        history.history["val_loss"][len(history.history["val_loss"]) - 1])
    accuracies.append(
        history.history['val_accuracy'][len(history.history['val_accuracy']) -
                                        1])

    # final_model = model_data[accuracies.index(max(accuracies))]
    # final_hist = models[accuracies.index(max(accuracies))]

    if generate_plots and not fine_tune:
        plots = generate_classification_plots(models[len(models) - 1])

    print("\n")
    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    number_of_examples = len(X_test.filenames)
    number_of_generator_calls = math.ceil(number_of_examples /
                                          (1.0 * X_test.batch_size))

    test_labels = []

    for i in range(0, int(number_of_generator_calls)):
        test_labels.extend(np.array(X_test[i][1]))

    predIdx = model.predict(X_test)

    if output_layer_activation == "sigmoid":
        real = [int(x) for x in test_labels]
        ans = []
        for i in range(len(predIdx)):
            ans.append(int(round(predIdx[i][0])))

    elif output_layer_activation == "softmax":
        real = []
        for ans in test_labels:
            real.append(ans.argmax())
        ans = []
        for r in predIdx:
            ans.append(r.argmax())

    else:
        print("NOT THE CASE")

    logger("Stored model under 'convolutional_NN' key")

    if save_as_tfjs:
        tfjs.converters.save_keras_model(model, "tfjsmodel")
        logger("Saved tfjs model under 'tfjsmodel' directory")

    if save_as_tflite:
        converter = tf.lite.TFLiteConverter.from_keras_model(model)
        tflite_model = converter.convert()
        open("model.tflite", "wb").write(tflite_model)
        logger("Saved tflite model as 'model.tflite' ")

    clearLog()

    K.clear_session()

    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        'res': {
            'real': real,
            'ans': ans
        },
        'model': model,
        'plots': plots,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
コード例 #13
0
ファイル: app.py プロジェクト: lamvinh1001/NLP
import torch
from flask import Flask, render_template, request
from models import predict_cap
from keras.applications import ResNet152
from fearture import feature_cap
from flask_cors import cross_origin
from googletts import speak
import re
import spacy
import gc
gc.collect()
torch.cuda.empty_cache()
resnet = ResNet152(include_top=False,
                   weights='imagenet',
                   input_shape=(224, 224, 3),
                   pooling='avg')

app = Flask(__name__)

app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1


class tokenize(object):
    def __init__(self, lang):
        self.nlp = spacy.load(lang)

    def tokenizer(self, sentence):
        sentence = re.sub(r"[\*\"“”\n\\…\+\-\/\=\(\)‘•:\[\]\|’\!;]", " ",
                          str(sentence))
        sentence = re.sub(r"[ ]+", " ", sentence)
        sentence = re.sub(r"\!+", "!", sentence)
コード例 #14
0
def convolutional(instruction=None,
                  read_mode=None,
                  preprocess=True,
                  data_path=None,
                  verbose=0,
                  new_folders=True,
                  image_column=None,
                  training_ratio=0.8,
                  augmentation=True,
                  custom_arch=None,
                  pretrained=None,
                  epochs=10,
                  height=None,
                  width=None):
    '''
    Body of the convolutional function used that is called in the neural network query
    if the data is presented in images.
    :param many parameters: used to preprocess, tune, plot generation, and parameterizing the convolutional neural network trained.
    :return dictionary that holds all the information for the finished model.
    '''

    # data_path = get_folder_dir()

    logger("Generating datasets for classes")

    if pretrained:
        if not height:
            height = 224
        if not width:
            width = 224
        if height != 224 or width != 224:
            raise ValueError(
                "For pretrained models, both 'height' and 'width' must be 224."
            )

    if preprocess:
        if custom_arch:
            raise ValueError(
                "If 'custom_arch' is not None, 'preprocess' must be set to false."
            )

        read_mode_info = set_distinguisher(data_path, read_mode)
        read_mode = read_mode_info["read_mode"]

        training_path = "/proc_training_set"
        testing_path = "/proc_testing_set"

        if read_mode == "setwise":
            processInfo = setwise_preprocessing(data_path, new_folders, height,
                                                width)
            if not new_folders:
                training_path = "/training_set"
                testing_path = "/testing_set"

        # if image dataset in form of csv
        elif read_mode == "csvwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = csv_preprocessing(read_mode_info["csv_path"],
                                            data_path, instruction,
                                            image_column, training_ratio,
                                            height, width)

        # if image dataset in form of one folder containing class folders
        elif read_mode == "classwise":
            if training_ratio <= 0 or training_ratio >= 1:
                raise BaseException(f"Test ratio must be between 0 and 1.")
            processInfo = classwise_preprocessing(data_path, training_ratio,
                                                  height, width)

    else:
        training_path = "/training_set"
        testing_path = "/testing_set"
        processInfo = already_processed(data_path)

    num_channels = 3
    color_mode = 'rgb'
    if processInfo["gray_scale"]:
        num_channels = 1
        color_mode = 'grayscale'

    input_shape = (processInfo["height"], processInfo["width"], num_channels)
    input_single = (processInfo["height"], processInfo["width"])
    num_classes = processInfo["num_categories"]
    loss_func = ""
    output_layer_activation = ""

    if num_classes > 2:
        loss_func = "categorical_crossentropy"
        output_layer_activation = "softmax"
    elif num_classes == 2:
        num_classes = 1
        loss_func = "binary_crossentropy"
        output_layer_activation = "sigmoid"

    logger("Creating convolutional neural netwwork dynamically")

    # Convolutional Neural Network

    # Build model based on custom_arch configuration if given
    if custom_arch:
        with open(custom_arch, "r") as f:
            custom_arch_dict = json.load(f)
            custom_arch_json_string = json.dumps(custom_arch_dict)
            model = model_from_json(custom_arch_json_string)

    # Build an existing state-of-the-art model
    elif pretrained:

        arch_lower = pretrained.get('arch').lower()

        # If user specifies value of pretrained['weights'] as 'imagenet', weights pretrained on ImageNet will be used
        if 'weights' in pretrained and pretrained.get('weights') == 'imagenet':
            # Load ImageNet pretrained weights
            if arch_lower == "vggnet16":
                base_model = VGG16(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "vggnet19":
                base_model = VGG19(include_top=False,
                                   weights='imagenet',
                                   input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                x = Dense(4096)(x)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet50":
                base_model = ResNet50(include_top=False,
                                      weights='imagenet',
                                      input_shape=input_shape)
                x = Flatten()(base_model.output)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet101":
                base_model = ResNet101(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            elif arch_lower == "resnet152":
                base_model = ResNet152(include_top=False,
                                       weights='imagenet',
                                       input_shape=input_shape)
                x = GlobalAveragePooling2D()(base_model.output)
                x = Dropout(0.5)(x)
                pred = Dense(num_classes,
                             activation=output_layer_activation)(x)
                model = Model(base_model.input, pred)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")

        else:
            # Randomly initialized weights
            if arch_lower == "vggnet16":
                model = VGG16(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "vggnet19":
                model = VGG19(include_top=True,
                              weights=None,
                              classes=num_classes,
                              classifier_activation=output_layer_activation)
            elif arch_lower == "resnet50":
                model = ResNet50(include_top=True,
                                 weights=None,
                                 classes=num_classes)
            elif arch_lower == "resnet101":
                model = ResNet101(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            elif arch_lower == "resnet152":
                model = ResNet152(include_top=True,
                                  weights=None,
                                  classes=num_classes)
            else:
                raise ModuleNotFoundError("arch \'" + pretrained.get('arch') +
                                          "\' not supported.")
    else:
        model = Sequential()
        # model.add(
        #     Conv2D(
        #         64,
        #         kernel_size=3,
        #         activation="relu",
        #         input_shape=input_shape))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Conv2D(64, kernel_size=3, activation="relu"))
        # model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(Flatten())
        # model.add(Dense(num_classes, activation="softmax"))
        # model.compile(
        #     optimizer="adam",
        #     loss=loss_func,
        #     metrics=['accuracy'])
        model.add(
            Conv2D(filters=64,
                   kernel_size=5,
                   activation="relu",
                   input_shape=input_shape))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.25))
        model.add(Conv2D(filters=64, kernel_size=3, activation="relu"))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Flatten())
        model.add(Dense(units=256, activation="relu"))
        model.add(Dropout(0.25))
        model.add(Dense(units=num_classes, activation="softmax"))

    model.compile(optimizer="adam", loss=loss_func, metrics=['accuracy'])

    logger("Located image data")

    if augmentation:
        train_data = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
        test_data = ImageDataGenerator(rescale=1. / 255)

        logger('Dataset augmented through zoom, shear, flip, and rescale')
    else:
        train_data = ImageDataGenerator()
        test_data = ImageDataGenerator()

    logger("->", "Optimal image size identified: {}".format(input_shape))
    X_train = train_data.flow_from_directory(
        data_path + training_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["train_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])
    X_test = test_data.flow_from_directory(
        data_path + testing_path,
        target_size=input_single,
        color_mode=color_mode,
        batch_size=(16 if processInfo["test_size"] >= 16 else 1),
        class_mode=loss_func[:loss_func.find("_")])

    if epochs <= 0:
        raise BaseException("Number of epochs has to be greater than 0.")
    logger('Training image model')
    history = model.fit_generator(
        X_train,
        steps_per_epoch=X_train.n // X_train.batch_size,
        validation_data=X_test,
        validation_steps=X_test.n // X_test.batch_size,
        epochs=epochs,
        verbose=verbose)

    logger(
        '->', 'Final training accuracy: {}'.format(
            history.history['accuracy'][len(history.history['accuracy']) - 1]))
    logger(
        '->',
        'Final validation accuracy: {}'.format(history.history['val_accuracy'][
            len(history.history['val_accuracy']) - 1]))
    # storing values the model dictionary

    logger("Stored model under 'convolutional_NN' key")
    clearLog()
    return {
        'id': generate_id(),
        'data_type': read_mode,
        'data_path': data_path,
        'data': {
            'train': X_train,
            'test': X_test
        },
        'shape': input_shape,
        "model": model,
        'losses': {
            'training_loss': history.history['loss'],
            'val_loss': history.history['val_loss']
        },
        'accuracy': {
            'training_accuracy': history.history['accuracy'],
            'validation_accuracy': history.history['val_accuracy']
        },
        'num_classes': (2 if num_classes == 1 else num_classes),
        'data_sizes': {
            'train_size': processInfo['train_size'],
            'test_size': processInfo['test_size']
        }
    }
コード例 #15
0
from keras.applications import VGG16, VGG19, Xception, ResNet101, ResNet101V2, ResNet152, ResNet152V2
from keras.applications import ResNet50, ResNet50V2, InceptionV3, InceptionResNetV2
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Activation
from keras.optimizers import Adam

# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
コード例 #16
0
def resNetV2(img_width=64, img_height=64, include_top=True):
    model = ResNet152(include_top=include_top,
                      weights=None,
                      input_shape=(img_width, img_height, 3),
                      classes=2)
    return model
コード例 #17
0
def build_model(lr=1e-4):
    model_Input = Input(shape=(224, 224, 3))

    ###### DenseNet201 Model ######
    dense_model = DenseNet201(weights='imagenet',
                              include_top=False,
                              input_shape=(224, 224, 3))(model_Input)
    dense_model_globalavg = GlobalAveragePooling2D()(dense_model)
    dense_model_Dropout = Dropout(0.5)(dense_model_globalavg)
    dense_model_BatchNormalization = BatchNormalization()(dense_model_Dropout)

    ###### ResNet152 Model ######
    res_model = ResNet152(weights='imagenet',
                          include_top=False,
                          input_shape=(224, 224, 3))(model_Input)
    res_model_globalavg = GlobalAveragePooling2D()(res_model)
    res_model_Dropout = Dropout(0.5)(res_model_globalavg)
    res_model_BatchNormalization = BatchNormalization()(res_model_Dropout)

    ###### VGG19 Model ######
    vgg_model = VGG19(weights='imagenet',
                      include_top=False,
                      input_shape=(224, 224, 3))(model_Input)
    vgg_model_globalavg = GlobalAveragePooling2D()(vgg_model)
    vgg_model_Dropout = Dropout(0.5)(vgg_model_globalavg)
    vgg_model_BatchNormalization = BatchNormalization()(vgg_model_Dropout)

    ###### Xception Model ######
    Xception_model = VGG19(weights='imagenet',
                           include_top=False,
                           input_shape=(224, 224, 3))(model_Input)
    Xception_model_globalavg = GlobalAveragePooling2D()(Xception_model)
    Xception_model_Dropout = Dropout(0.5)(Xception_model_globalavg)
    Xception_model_BatchNormalization = BatchNormalization()(
        Xception_model_Dropout)

    ###### Inception_v3 Model ######
    Inception_v3_model = Inception_v3(weights='imagenet',
                                      include_top=False,
                                      input_shape=(224, 224, 3))(model_Input)
    Inception_v3_model_globalavg = GlobalAveragePooling2D()(Inception_v3_model)
    Inception_v3_model_Dropout = Dropout(0.5)(Inception_v3_model_globalavg)
    Inception_v3_model_BatchNormalization = BatchNormalization()(
        Inception_v3_model_Dropout)

    # Concatenating all 5 models into model_concat
    model_concat = concatenate([
        dense_model_BatchNormalization, res_model_BatchNormalization,
        vgg_model_BatchNormalization, Xception_model_BatchNormalization,
        Inception_v3_model_BatchNormalization
    ])

    # Adding dense and dropout layers after the concatenation
    model_Dense_1 = Dense(1000)(model_concat)
    model_Drop_1 = Dropout(0.5)(model_Dense_1)
    model_Dense_2 = Dense(1000)(model_Drop_1)
    model_Drop_2 = Dropout(0.5)(model_Dense_2)
    model_Output = Dense(2, activation='softmax')(model_Drop_2)

    # Defining the loss function and the optimizer
    model = Model(inputs=model_Input, outputs=model_Output)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=lr),
                  metrics=['accuracy'])

    return model
コード例 #18
0
#     model.add(VGG16(include_top=False,weights="imagenet",input_tensor=(32,32,3)))
#     model.add(Flatten())
#     model.add(Dense())
#     model.add(BatchNormalization())
#     model.add(Activation("relu"))
#     model.add(Dense(10,activation="softmax"))

#     model.compile(loss=loss,optimizer=optimizer,metrics=['acc'])
#     model.fit(x_train,y_train,batch_size=100,epochs=20)
#     return model

# model =build_model_vgg16()

model = Sequential()
resnet = ResNet152(include_top=False,
                   weights="imagenet",
                   input_shape=(32, 32, 3))
model.add(resnet)
model.add(Flatten())
model.add(Dense(100))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dense(10, activation="softmax"))

model.compile(loss="sparse_categorical_crossentropy",
              optimizer=adam(1e-4),
              metrics=['acc'])
model.fit(x_train, y_train, batch_size=100, epochs=20, validation_split=0.3)

loss, acc = model.evaluate(x_test, y_test)
print("loss")
コード例 #19
0
import matplotlib.pyplot as plt
import cv2
import config

# Pre-trained Network model
NN_DICT = {
    'VGG16': {
        'model': VGG16(weights="imagenet"),
        'layer_name': 'block5_conv3',
    },
    'ResNet50': {
        'model': ResNet50(weights="imagenet"),
        'layer_name': 'activation_49',
    },
    'ResNet152': {
        'model': ResNet152(weights="imagenet"),
        'layer_name': 'conv5_block3_out',
    },
    'DenseNet121': {
        'model': DenseNet121(weights="imagenet"),
        'layer_name': 'relu',
    },
}

# Image Size to input the model
IMG_SIZE = (224, 224)
# frame numbers
FRAME_NUMBERS = [1, 24, 48, 72, 96, 120, 144, 168]


def get_image_activations(image_path):