Ejemplo n.º 1
0
def efficient_model(input_shape=(224, 224, 3)):
    model = EfficientNetB0(input_shape=input_shape,
                           weights=None)  #can do B7 if want larger
    input_layer = model.input
    last_layer = model.layers[-2].output
    x = Dense(1, activation='sigmoid')(last_layer)
    return Model(inputs=input_layer, outputs=x)
Ejemplo n.º 2
0
    def _build(self):

        #        inputs = keras.layers.Input((*self.input_dims, 1))
        #        x = keras.layers.Conv2D(filters=3, kernel_size=(1, 1), strides=(1, 1), name="initial_conv2d")(inputs)
        #        x = keras.layers.BatchNormalization(axis=3, epsilon=1.001e-5, name='initial_bn')(x)
        #        x = keras.layers.Activation('relu', name='initial_relu')(x)
        #
        engine = EfficientNetB0(include_top=False,
                                input_shape=(*self.input_dims[:2], 3),
                                classes=6)

        x = keras.layers.GlobalAveragePooling2D(name='avg_pool')(engine.output)
        x = keras.layers.Dropout(0.2)(x)
        x = keras.layers.Dense(keras.backend.int_shape(x)[1],
                               activation="relu",
                               name="dense_hidden_1")(x)
        x = keras.layers.Dropout(0.1)(x)
        out = keras.layers.Dense(6, activation="sigmoid",
                                 name='dense_output')(x)

        self.model = keras.models.Model(inputs=engine.input, outputs=out)

        self.model.compile(loss='binary_crossentropy',
                           optimizer='sgd',
                           metrics=['acc', 'mse'])
Ejemplo n.º 3
0
    def _build(self):

        self, model = EfficientNetB0(include_top=False,
                                     input_shape=(*self.input_dims, 1),
                                     classes=6)
        self.model.compile(loss=weighted_log_loss,
                           optimizer=keras.optimizers.Adam(0.0),
                           metrics=[weighted_loss])
def build_efficient_net(n_classes, img_size, model_name):

    input_layer = Input((img_size, img_size, 3))
    eff_net = EfficientNetB0(weights=None,
                             include_top=False,
                             input_tensor=input_layer)
    eff_net.trainable = True

    head = vanilla_head(eff_net.output, n_classes)
    return Model(input_layer, head, name=model_name)
Ejemplo n.º 5
0
    def __init__(self,
                 model_type='vgg19',
                 layer='fc1',
                 weights='imagenet',
                 img_dims=(320, 240),
                 **kwargs):
        """Initializes the object feature detector module
        efficientnet/probs
        Args:
        vgg19:fc2
        """
        super().__init__(**kwargs)
        self.weights = weights
        self.layer = layer
        self.session = tf.Session(graph=tf.Graph())
        self.pre_input = None
        self.img_dims = img_dims
        self.queue = deque(maxlen=1)
        with self.session.graph.as_default():
            set_session(self.session)

            if model_type == 'efficientnet':
                from efficientnet.keras import EfficientNetB0
                from efficientnet.keras import preprocess_input
                self.pre_input = preprocess_input
                base_model = EfficientNetB0(weights=self.weights)
                self.xs, self.ys = 224, 224

            if model_type == 'inveptionv3':
                from keras.applications.inception_v3 import InceptionV3
                from keras.applications.inception_v3 import preprocess_input
                base_model = InceptionV3(weights=self.weights,
                                         include_top=True)
                self.pre_input = preprocess_input
                self.xs, self.ys = 299, 299

            if model_type == 'vgg19':
                from keras.applications.vgg19 import VGG19
                from keras.applications.vgg19 import preprocess_input
                base_model = VGG19(weights=self.weights, include_top=True)
                self.pre_input = preprocess_input
                self.xs, self.ys = 224, 224

            # allow for other output layers
            print(base_model.summary())
            self.model = Model(inputs=base_model.input,
                               outputs=base_model.get_layer(self.layer).output)
        print('Feature extractor module setup complete.')
 def _get_backbone_model(self):
     backbone_model = None
     if self.backbone_name == 'resnet18':
         backbone_model = ResNet18(input_shape=self.input_shape, include_top=False, weights='imagenet')
     if self.backbone_name == 'resnet34':
         backbone_model = ResNet34(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'mobilenetv2':
         backbone_model = MobileNetV2(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'efficientnetb0':
         backbone_model = EfficientNetB0(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'efficientnetb1':
         backbone_model = EfficientNetB1(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'efficientnetb3':
         backbone_model = EfficientNetB3(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'efficientnetb5':
         backbone_model = EfficientNetB5(input_shape=self.input_shape, include_top=False, weights='imagenet')
     elif self.backbone_name == 'densenet':
         backbone_model = DenseNet121(input_shape=self.input_shape, include_top=False, weights='imagenet')
     return backbone_model
Ejemplo n.º 7
0
    def __init__(self, model_size , imag_size, num_cls):
    
        try: 
            if model_size == "B0": model = EfficientNetB0(weights = 'imagenet', input_shape = (imag_size,imag_size,3), include_top = False)
            elif model_size == "B3": model = EfficientNetB3(weights = 'imagenet', input_shape = (imag_size,imag_size,3), include_top = False)
            elif model_size == "B5": model = EfficientNetB5(weights = 'imagenet', input_shape = (imag_size,imag_size,3), include_top = False)
            elif model_size == "B7": model = EfficientNetB7(weights = 'imagenet', input_shape = (imag_size,imag_size,3), include_top = False)

            ENet_out = model.output
            ENet_out = Flatten()(ENet_out)


            Hidden1_in = Dense(1024, activation="relu")(ENet_out)
            Hidden1_in = Dropout(0.5)(Hidden1_in)

            predictions = Dense(units = num_cls, activation="softmax")(Hidden1_in)
            self.model_f = Model(input = model.input, output = predictions)
            self.model_f.compile(optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08), loss='categorical_crossentropy', metrics=[metrics.mae, metrics.categorical_accuracy])
        
        except:
            print("only B0/B3/B5/B7 allowed")
    if not os.path.isdir(result_path):
        os.mkdir(DEFAULT_IMAGE_DIR + '/result')
        os.mkdir(DEFAULT_IMAGE_DIR + '/cropped')

################# LOAD MODELS #######################
#### MASK-R-CNN
    mrcnn_model = modellib.MaskRCNN(mode="inference",
                                    config=config,
                                    model_dir=DEFAULT_LOGS_DIR)
    weights_path = DEFAULT_MRCNN_MODEL_DIR
    mrcnn_model.load_weights(weights_path, by_name=True)

    ##### EFFICIENTNET

    efficient_net = EfficientNetB0(weights='imagenet',
                                   input_shape=(32, 32, 3),
                                   include_top=False,
                                   pooling='max')

    eff_model = Sequential()
    eff_model.add(efficient_net)
    eff_model.add(Dense(units=120, activation='relu'))
    eff_model.add(Dense(units=120, activation='relu'))
    eff_model.add(Dense(units=1, activation='sigmoid'))
    eff_model.summary()

    eff_model.compile(optimizer=Adam(lr=0.0001),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

    eff_model.load_weights(DEFAULT_EFF_MODEL_DIR)
    #############################################################################3
Ejemplo n.º 9
0
#name: Image Classification
#description: Image classification based of Efficientnet model
#language: python
#input: file file
#output: map classes [Detected classes with probabilities]
#tags: demo, panel, files, efficientnet
#condition: file.isFile && file.size < 1e6 && (file.name.endsWith("jpg") || file.name.endsWith("jpeg") || file.name.endsWith("png"))
#help-url: https://github.com/qubvel/efficientnet

import numpy as np
from skimage.io import imread
from efficientnet.keras import EfficientNetB0
from keras.applications.imagenet_utils import decode_predictions
from efficientnet.keras import center_crop_and_resize, preprocess_input

image = imread(file)
model = EfficientNetB0(weights='imagenet')
image_size = model.input_shape[1]
_image = center_crop_and_resize(image, image_size=image_size)
_image = preprocess_input(_image)
_image = np.expand_dims(_image, 0)
predicted = model.predict(_image)
predicted = decode_predictions(predicted)[0]

classes = {}
for p in predicted:
    classes[p[1]] = float(p[2])
Ejemplo n.º 10
0
    verbose=1,
    save_best_only=True,
    mode="max",
)
lr_reduce = ReduceLROnPlateau(
    monitor="val_loss",
    factor=np.sqrt(0.1),
    patience=5,
    verbose=1,
    cooldown=0,
    min_lr=0.5e-6,
)
callbacks = [checkpoint, lr_reduce]

conv_m = EfficientNetB0(weights="imagenet",
                        include_top=False,
                        input_shape=(size, size, 3))
conv_m.trainable = False
model = Sequential()
model.add(conv_m)
model.add(AveragePooling2D(pool_size=(7, 7)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(5, activation="softmax"))

model.compile(
    loss="categorical_crossentropy",
    optimizer=SGD(lr=0.1, momentum=0.9),
    metrics=["accuracy"],
Ejemplo n.º 11
0
	target_size=(img_rows, img_cols),
	batch_size=batch_size,
	class_mode='categorical')#多分类; 'binary')
print("train_generator.filenames",train_generator.filenames)# 按顺序输出文件的名字
print("train_generator.class_indices", train_generator.class_indices)  #输出对应的标签文件夹
validation_generator = test_datagen.flow_from_directory(
	test_data_dir,
	target_size=(img_rows, img_cols),
	batch_size=batch_size,
	class_mode='categorical')#多分类; 'binary')
print("validation_generator.filenames",validation_generator.filenames)# 按顺序输出文件的名字
print("validation_generator.class_indices", validation_generator.class_indices)  #输出对应的标签文件夹

##################训练网络模型###########################
# build model
base_model = EfficientNetB0(input_shape=(img_rows, img_cols, 3), weights='imagenet', include_top=False)
x = keras.layers.GlobalAveragePooling2D()(base_model.output)
output = keras.layers.Dense(num_classes, activation='softmax')(x)
model = keras.models.Model(inputs=[base_model.input], outputs=[output])
# initiate optimizer
opt = keras.optimizers.SGD(lr=0.001) #  keras.optimizers.Adam(lr=0.001)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy']) # optimizer='SGD'
# train
history = model.fit_generator(
    train_generator,
    steps_per_epoch=nb_train_samples // batch_size,  # typically steps_per_epoch= dataset samples 3120 / batch_size 4
    epochs=epochs,  # finacal epoches
    validation_data=validation_generator,
    validation_steps=nb_validation_samples// batch_size, # typically validation_steps = validation dataset samples 780 / batch_size 4
    verbose = 2,
)
Ejemplo n.º 12
0
            json_file.close()
            model = model_from_json(model_json)
            model.load_weights(args["weights"])
            model.compile(
                optimizer=Adam(lr=0.0001),
                loss="binary_crossentropy",
                metrics=["accuracy"],
            )
            break
        elif choice in ("n", "N"):
            sys.exit(0)

else:
    # initializeaza si compileaza modelul EfficientNet (foloseste transfer learning)
    efficient_net = EfficientNetB0(weights="imagenet",
                                   input_shape=(256, 128, 3),
                                   include_top=False,
                                   pooling="max")
    model = Sequential()
    model.add(efficient_net)
    model.add(Dense(units=120, activation="relu"))
    model.add(Dense(units=120, activation="relu"))
    model.add(Dense(units=2, activation="softmax"))
    model.compile(optimizer=Adam(lr=0.0001),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])
    if not os.path.isfile(args["model"]):
        model_json = model.to_json()
        with open(args["model"], "w") as json_file:
            json_file.write(model_json)
    model.summary()
Ejemplo n.º 13
0
x_test, y_test = load_data(dataset_test_dst)
print("x_train.shape = {}".format(x_train.shape))
print("x_test.shape = {}".format(x_test.shape))

x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255

# one-hot encoding
num_cls = 5
Y_train = np_utils.to_categorical(y_train, num_cls)
Y_test = np_utils.to_categorical(y_test, num_cls)
#------------------------------------end of importing data-----------------------------#

#------------------------------------start building model-----------------------------#
model = EfficientNetB0(weights='imagenet',
                       input_shape=(imag_size, imag_size, 3),
                       include_top=False)

ENet_out = model.output
ENet_out = Flatten()(ENet_out)

Hidden1_in = Dense(1024, activation="relu")(ENet_out)
Hidden1_in = Dropout(0.5)(Hidden1_in)

predictions = Dense(units=num_cls, activation="softmax")(Hidden1_in)
model_f = Model(input=model.input, output=predictions)
model_f.compile(optimizers.Adam(lr=0.0001,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=1e-08),
                loss='categorical_crossentropy',
Ejemplo n.º 14
0
def setup():
    return EfficientNetB0(weights='imagenet')
        model_split = 'block2a_expand_conv'

        layer_idx = [
            i for i in range(len(model.layers))
            if model.layers[i].name == model_split
        ][0]

        for i, layer in enumerate(model.layers):
            if i < layer_idx or isinstance(layer, layers.BatchNormalization):
                layer.trainable = False
            else:
                layer.trainable = True
    else:
        base_model = EfficientNetB0(include_top=False,
                                    weights='imagenet',
                                    input_shape=(None, None, 3),
                                    pooling='avg')

        x = base_model.output
        predictions = layers.Dense(1, activation='sigmoid')(x)

        model = Model(inputs=base_model.input, outputs=predictions)

        if freeze:
            for layer in base_model.layers:
                layer.trainable = False
        else:
            model_split = 'block2a_expand_conv'

            layer_idx = [
                i for i in range(len(model.layers))
Ejemplo n.º 16
0
    opt = Adam(lr=INIT_LR)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  min_lr=1e-10,
                                  verbose=1,
                                  cooldown=2)

    model_checkpoint = ModelCheckpoint('EFNB3Weight.h5',
                                       monitor='val_loss',
                                       save_best_only=True,
                                       period=3)
    input_tensor = Input(shape=(28, 28, 3))

    model = EfficientNetB0(weights=None,
                           include_top=False,
                           input_tensor=input_tensor)

    x = model.output
    x = Flatten()(x)
    x = Dense(1024, activation="relu")(x)
    x = Dropout(0.5)(x)
    predictions = Dense(units=10, activation="softmax")(x)
    model_f = Model(input=model.input, output=predictions)
    model_f.compile(Adam(lr=INIT_LR),
                    loss='categorical_crossentropy',
                    metrics=['accuracy'])

    print(model.summary())

    # train the network