def get_model(model='b2', shape=(320,320)):
    K.clear_session()
    h,w = shape
    if model == 'b0':
        base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b1':
        base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b2':
        base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b3':
        base_model =  efn.EfficientNetB3(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b4':
        base_model =  efn.EfficientNetB4(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b5':
        base_model =  efn.EfficientNetB5(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    elif model == 'b6':
        base_model =  efn.EfficientNetB6(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))

    else:
        base_model =  efn.EfficientNetB7(weights='imagenet', include_top=False, pooling='avg', input_shape=(h, w, 3))


    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
Ejemplo n.º 2
0
def build_model(base = 'resnet', trainable = False):
  # choose a base model
  if base == 'resnet':
    base_model = keras.applications.ResNet50(weights = 'imagenet', include_top = False)
  elif base == 'efficientnet':
    base_model = efn.EfficientNetB7(weights = 'imagenet', include_top = False)  
  base_model.trainable = trainable

  # build on top of the base model
  inputs = Input(shape = (IMG_SIZE, IMG_SIZE, 1))
  x = Conv2D(3, (3, 3), padding = 'same')(inputs)
  x = base_model(x)
  x = GlobalAveragePooling2D()(x)
  x = BatchNormalization()(x)
  x = Dropout(0.5)(x)
  x = Dense(1024, activation = 'relu')(x)
  x = Dense(512, activation = 'relu')(x)
  x = Dense(256, activation='relu')(x)
  x = BatchNormalization()(x)
  x = Dropout(0.5)(x)
  head_root = Dense(168, activation = 'softmax', name = "root")(x)
  head_vowel = Dense(11, activation = 'softmax', name = "vowel")(x)
  head_consonant = Dense(7, activation = 'softmax', name = "consonant")(x)

  model = Model(inputs = inputs, outputs = [head_root, head_vowel, head_consonant])

  # compile model
  model.compile(optimizer = 'adam', 
                loss = {"root": "categorical_crossentropy",
                        "vowel": "categorical_crossentropy",
                        "consonant": "categorical_crossentropy"},  
                metrics = ["accuracy"])
  return model
Ejemplo n.º 3
0
def cnn_model(img_size, weights):
    input_size = (img_size, img_size, 3)
    baseModel = efn.EfficientNetB7(
        weights="imagenet",
        include_top=False,
        input_shape=input_size,
        pooling='max'
    )

    model = Sequential()
    model.add(baseModel)
    model.add(Dense(units=512, activation='relu', kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01),
                    bias_initializer='zeros'))
    model.add(Dropout(0.5))
    model.add(Dense(units=128, activation='relu', kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01),
                    bias_initializer='zeros'))
    model.add(Dense(units=2, activation='softmax', kernel_initializer=initializers.RandomNormal(mean=0.0, stddev=0.01),
                    bias_initializer='zeros'))
    model.summary()

    optimizer = Adam(
        lr=0.0001
    )
    model.compile(
        loss="categorical_crossentropy",
        optimizer=optimizer,
        metrics=["accuracy"]
    )

    model.load_weights(weights)

    return model
Ejemplo n.º 4
0
def get_efficientnet_model(
    model_name='efficientnetb0',
    input_shape=(224, 224, 3),
    input_tensor=None,
    include_top=True,
    classes=1000,
    weights='imagenet',
):

    layer_names = [
        'block3a_expand_activation',  #C2
        'block4a_expand_activation',  #C3
        'block6a_expand_activation',  #C4
        'top_activation'  #C5
    ]

    Args = {
        'input_shape': input_shape,
        'weights': weights,
        'include_top': include_top,
        'input_tensor': input_tensor
    }

    if model_name == 'efficientnetb0':
        backbone = efn.EfficientNetB0(**Args)

    elif model_name == 'efficientnetb1':
        backbone = efn.EfficientNetB1(**Args)

    elif model_name == 'efficientnetb2':
        backbone = efn.EfficientNetB2(**Args)

    elif model_name == 'efficientnetb3':
        backbone = efn.EfficientNetB3(**Args)

    elif model_name == 'efficientnetb4':
        backbone = efn.EfficientNetB4(**Args)

    elif model_name == 'efficientnetb5':
        backbone = efn.EfficientNetB5(**Args)

    elif model_name == 'efficientnetb6':
        backbone = efn.EfficientNetB6(**Args)

    elif model_name == 'efficientnetb7':
        backbone = efn.EfficientNetB7(**Args)

    else:
        raise ValueError('No such model {}'.format(model_name))

    several_layers = []

    several_layers.append(backbone.get_layer(layer_names[0]).output)
    several_layers.append(backbone.get_layer(layer_names[1]).output)
    several_layers.append(backbone.get_layer(layer_names[2]).output)
    several_layers.append(backbone.get_layer(layer_names[3]).output)

    model = keras.models.Model(inputs=[backbone.input], outputs=several_layers)
    return model
Ejemplo n.º 5
0
def get_model():
    K.clear_session()
    base_model = efn.EfficientNetB7(weights='imagenet',
                                    include_top=False,
                                    pooling='avg',
                                    input_shape=(260, 260, 3))
    x = base_model.output
    y_pred = Dense(4, activation='sigmoid')(x)
    return Model(inputs=base_model.input, outputs=y_pred)
Ejemplo n.º 6
0
 def _setup_base_model(self):
     base_model = efn.EfficientNetB7(weights='imagenet', include_top=False)
     # fix the feature extraction part of the model
     for layer in base_model.layers:
         if isinstance(layer, BatchNormalization):
             layer.trainable = True
         else:
             layer.trainable = False
     return base_model
Ejemplo n.º 7
0
def effnet_retinanet(num_classes, backbone='EfficientNetB0', inputs=None, modifier=None, **kwargs):
    """ Constructs a retinanet model using a resnet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a ResNet backbone.
    """
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            # inputs = keras.layers.Input(shape=(224, 224, 3))
            inputs = keras.layers.Input(shape=(None, None, 3))

    # get last conv layer from the end of each block [28x28, 14x14, 7x7]
    if backbone == 'EfficientNetB0':
        model = efn.EfficientNetB0(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB1':
        model = efn.EfficientNetB1(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB2':
        model = efn.EfficientNetB2(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB3':
        model = efn.EfficientNetB3(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB4':
        model = efn.EfficientNetB4(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB5':
        model = efn.EfficientNetB5(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB6':
        model = efn.EfficientNetB6(input_tensor=inputs, include_top=False, weights=None)
    elif backbone == 'EfficientNetB7':
        model = efn.EfficientNetB7(input_tensor=inputs, include_top=False, weights=None)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    layer_outputs = ['block4a_expand_activation', 'block6a_expand_activation', 'top_activation']

    layer_outputs = [
        model.get_layer(name=layer_outputs[0]).output,  # 28x28
        model.get_layer(name=layer_outputs[1]).output,  # 14x14
        model.get_layer(name=layer_outputs[2]).output,  # 7x7
    ]
    # create the densenet backbone
    model = keras.models.Model(inputs=inputs, outputs=layer_outputs, name=model.name)

    # invoke modifier if given
    if modifier:
        model = modifier(model)

    # create the full model
    return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=model.outputs, **kwargs)
Ejemplo n.º 8
0
 def test_efn(self):
     from efficientnet import keras as efn
     keras.backend.set_learning_phase(0)
     model = efn.EfficientNetB7(weights='imagenet')
     res = run_image(model,
                     self.model_files,
                     img_path,
                     target_size=(600, 600),
                     rtol=1e-1)
     self.assertTrue(*res)
Ejemplo n.º 9
0
def construct_mlp(input_size, num_classes, num_frames,
                  dropout_size=0.5, ef_mode=4, l2_reg=1e-5):
    """
    Construct a MLP model for urban sound tagging.
    Parameters
    ----------
    num_frames
    input_size
    num_classes
    dropout_size
    ef_mode
    l2_reg
    Returns
    -------
    model
    """

    # Add hidden layers
    from keras.layers import Flatten, Conv1D, Conv2D, GlobalMaxPooling1D, GlobalAveragePooling1D, LSTM, Concatenate, GlobalAveragePooling2D, LeakyReLU

    import efficientnet.keras as efn

    if ef_mode == 0:
        base_model = efn.EfficientNetB0(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 1:
        base_model = efn.EfficientNetB1(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 2:
        base_model = efn.EfficientNetB2(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 3:
        base_model = efn.EfficientNetB3(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 4:
        base_model = efn.EfficientNetB4(weights='noisy-student', include_top=False, pooling='avg')  #imagenet or weights='noisy-student'
    elif ef_mode == 5:
        base_model = efn.EfficientNetB5(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 6:
        base_model = efn.EfficientNetB6(weights='noisy-student', include_top=False, pooling='avg')
    elif ef_mode == 7:
        base_model = efn.EfficientNetB7(weights='noisy-student', include_top=False, pooling='avg')

    input1 = Input(shape=input_size, dtype='float32', name='input')
    input2 = Input(shape=(num_frames,85), dtype='float32', name='input2') #1621
    y = TimeDistributed(base_model)(input1)
    y = TimeDistributed(Dropout(dropout_size))(y)
    y = Concatenate()([y, input2])
    y = TimeDistributed(Dense(num_classes, activation='sigmoid', kernel_regularizer=regularizers.l2(l2_reg)))(y)
    y = AutoPool1D(axis=1, name='output')(y)

    m = Model(inputs=[input1, input2], outputs=y)
    m.summary()
    m.name = 'urban_sound_classifier'

    return m
def cnn_model(img_size, weights):
    # 이전에 학습된 weight 파일 이름
    weight_name = "EfficientNetB7_w_NT_299_batch_10_v3"
    """
    Model definition using Xception net architecture
    """
    input_size = (img_size, img_size, 3)
    baseModel = efn.EfficientNetB7(weights="imagenet",
                                   include_top=False,
                                   input_shape=(img_size, img_size, 3),
                                   pooling='max')

    model = Sequential()
    model.add(baseModel)
    model.add(
        Dense(units=512,
              activation='relu',
              kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                           stddev=0.01),
              bias_initializer='zeros'))
    model.add(Dropout(0.5))
    model.add(
        Dense(units=128,
              activation='relu',
              kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                           stddev=0.01),
              bias_initializer='zeros'))
    model.add(
        Dense(units=2,
              activation='softmax',
              kernel_initializer=initializers.RandomNormal(mean=0.0,
                                                           stddev=0.01),
              bias_initializer='zeros'))
    model.summary()

    if weights:
        model.load_weights("models/" + weight_name + ".hdf5")

    # weight 동결 해제하고 trainable로 변경
    for layer in baseModel.layers:
        layer.trainable = True

    optimizer = Adam(lr=0.0001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=optimizer,
                  metrics=["accuracy"])
    return model
Ejemplo n.º 11
0
def frozen_efnet7(input_size, n_classes):
    model_ = efn.EfficientNetB7(
        include_top=False,
        input_tensor=Input(shape=input_size),
    )
    #여기까지 초기값 주는 부분
    #다른거 다 똑같이 하면 되는데 local weight같은 경우에 한번 찾아봐야 함

    #돌려봤는데, 인터넷에서 알아서 초기값을 받아온다. 그냥 괜찮은듯 성능도 한번 돌려본 바에 의하면 좋고

    for layer in model_.layers:
        layer.trainable = False  # 전이학습을 위해 freeze 한다는것 같다.

    x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = Dense(n_classes, activation='softmax')(x)

    frozen_model = Model(model_.input, x)

    return frozen_model
Ejemplo n.º 12
0
def get_model_effnet(img_shape, img_input, weights, effnet_version):

    if effnet_version == 'B0':
        effnet = efn.EfficientNetB0(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B1':
        effnet = efn.EfficientNetB1(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B2':
        effnet = efn.EfficientNetB2(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B3':
        effnet = efn.EfficientNetB3(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B4':
        effnet = efn.EfficientNetB4(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B5':
        effnet = efn.EfficientNetB5(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    elif effnet_version == 'B6':
        effnet = efn.EfficientNetB6(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)
    else:
        effnet = efn.EfficientNetB7(include_top=False, input_tensor=img_input, weights=weights, pooling=None, input_shape=img_shape)

    return effnet
Ejemplo n.º 13
0
def create_base_model(base_model_name, pretrained=True, IMAGE_SIZE=[300, 300]):
    if pretrained is False:
        weights = None
    else:
        weights = "imagenet"
    if base_model_name == 'B0':
        base = efn.EfficientNetB0(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B1':
        base = efn.EfficientNetB1(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B2':
        base = efn.EfficientNetB2(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B3':
        base = efn.EfficientNetB3(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B4':
        base = efn.EfficientNetB4(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B5':
        base = efn.EfficientNetB5(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B6':
        base = efn.EfficientNetB6(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    elif base_model_name == 'B7':
        base = efn.EfficientNetB7(weights=weights,
                                  include_top=False,
                                  input_shape=[*IMAGE_SIZE, 3])
    base = remove_dropout(base)
    base.trainable = True
    return base
Ejemplo n.º 14
0
    model = efn.EfficientNetB2(weights=weights)

if b_name == "3":
    model = efn.EfficientNetB3(weights=weights)

if b_name == "4":
    model = efn.EfficientNetB4(weights=weights)

if b_name == "5":
    model = efn.EfficientNetB5(weights=weights)

if b_name == "6":
    model = efn.EfficientNetB6(weights=weights)

if b_name == "7":
    model = efn.EfficientNetB7(weights=weights)

image_size = model.input_shape[1]


def read_image(path):
    try:
        return preprocess_input(
            center_crop_and_resize(imread(path)[:, :, :3],
                                   image_size=image_size))
    except:
        return None


def predictor(in_paths=[], batch_size=2):
    #with multiprocessing.Pool(batch_size) as pool:
else:
    # if you want file of a specific extension (.png):
    filelist = [f for f in glob.glob(base_path + "**/*.png", recursive=True)]
    test = []
    for file in filelist:
        img = Image.open(file)
        img = np.array(img)
        test.append(img)
    X_train = np.array(test)

#%% 모델 구조 정의 / 네트워크 생성
import efficientnet.keras as efn
from iterstrat.ml_stratifiers import MultilabelStratifiedShuffleSplit

base_model = efn.EfficientNetB7(weights='imagenet',
                                include_top=False,
                                pooling='avg',
                                input_shape=(100, 100, 3))
x = base_model.output
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
output = Dense(nb_classes, activation='softmax')(x)
model = Model(base_model.input, output)
#model = multi_gpu_model(model, gpus = 2)                                       ###in case of using multi GPU
model.summary()
# Original code below mingeon Kim
#model.compile(optimizer=optimizers.Adam(lr = 1e-5), loss = 'categorical_crossentropy',  metrics=['accuracy'])
model.compile(optimizer=optimizers.Adam(lr=1e-4),
              loss='categorical_crossentropy',
              metrics=['accuracy'])  #modified jgshin for low powered computer

#%%#%% 훈련하기/Trainig fit
                        type=int,
                        default=1)
    args = parser.parse_args()
    assert args.d in ['imagenet'
                      ], "Dataset should be either 'mnist' or 'cifar'"
    assert args.attack in ["fgsm", "bim-a", "bim-b", "bim", "jsma",
                           "c+w"], "Attack we should used"
    print(args)

    if args.d == 'imagenet':
        print(
            'Generate the adversarial example of dataset %s using model %s with the attack %s'
            % (args.d, args.model, args.attack))
        if args.model == 'efficientnetb7':
            model = efn.EfficientNetB7(
                weights='imagenet'
            )  # only use without modifying batch size (default: 1)
            classifier = KerasClassifier(model=model, use_logits=False)

        for i in range(args.val_start, args.val_end):
            x_test, y_test = pickle.load(
                open(
                    './dataset_imagenet/%s_%s_val_%i.p' %
                    (args.d, args.model, int(i)), 'rb'))

            if args.attack == 'fgsm':
                attack = FastGradientMethod(classifier=classifier,
                                            eps=0.6,
                                            eps_step=0.6)
            if args.attack == 'bim':
                attack = BasicIterativeMethod(classifier=classifier,
Ejemplo n.º 17
0
def get_backbone(name):
    """ Chooses a backbone/ base network.

        Args:
            name: the name of the base network.

        Returns:
            backbone: the Keras model of the chosen network.
    """
    if name == 'EfficientNetB0':
        backbone = efn.EfficientNetB0(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB1':
        backbone = efn.EfficientNetB1(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB2':
        backbone = efn.EfficientNetB2(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB3':
        backbone = efn.EfficientNetB3(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB4':
        backbone = efn.EfficientNetB4(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB5':
        backbone = efn.EfficientNetB5(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB6':
        backbone = efn.EfficientNetB6(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'EfficientNetB7':
        backbone = efn.EfficientNetB7(include_top=c.INCLUDE_TOP,
                                      weights=c.WEIGHTS,
                                      input_shape=c.INPUT_SHAPE,
                                      pooling=c.POOLING)
    elif name == 'VGG16':
        backbone = VGG16(weights=c.WEIGHTS,
                         include_top=c.INCLUDE_TOP,
                         input_shape=c.INPUT_SHAPE,
                         pooling=c.POOLING)
    elif name == 'ResNet50':
        backbone = ResNet50(include_top=c.INCLUDE_TOP,
                            weights=c.WEIGHTS,
                            input_shape=c.INPUT_SHAPE,
                            pooling=c.POOLING)
    elif name == 'InceptionV3':
        backbone = InceptionV3(include_top=c.INCLUDE_TOP,
                               weights=c.WEIGHTS,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    elif name == 'DenseNet201':
        backbone = DenseNet201(weights=c.WEIGHTS,
                               include_top=c.INCLUDE_TOP,
                               input_shape=c.INPUT_SHAPE,
                               pooling=c.POOLING)
    else:
        backbone = None
    try:
        backbone.trainable = True
        return backbone
    except Exception as e:
        print(str(e))
Ejemplo n.º 18
0
def model(input_form="all", aux_size=0, hyperparameters=dict()):
    print("using the following hyperparameters: {}".format(hyperparameters))

    if input_form == "features":
        return features_model(aux_size, hyperparameters)

    parameters = INPUT_FORM_PARAMETERS[input_form]

    inputs = list()
    outputs = list()

    #retreiving the hyperparameters
    DROPOUT = hyperparameters.get("dropout", 0.5)
    OPTIMIZER = hyperparameters.get("optimizer", "sgd-0001-0.9")
    DEEP_DENSE_TOP = hyperparameters.get("deep-dense-top", True)
    CONVNET_FREEZE_PERCENT = hyperparameters.get("convnet-freeze-percent", 0.0)

    #skip for now
    if parameters["t2"]:
        convnet = efn.EfficientNetB7(
            weights="imagenet",
            include_top=False,
            input_shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3),
        )
        for layer in convnet.layers:
            layer.name = "{}_t2".format(layer.name)
        apply_layer_freeze(convnet, CONVNET_FREEZE_PERCENT)
        out = convnet.output
        out = Flatten()(out)
        inputs.append(convnet.input)
        outputs.append(out)

    if parameters["t1"]:
        # init ResNet
        convnet = efn.EfficientNetB7(
            weights="imagenet",
            include_top=False,
            input_shape=(config.IMAGE_SIZE, config.IMAGE_SIZE, 3),
        )
        apply_layer_freeze(convnet, CONVNET_FREEZE_PERCENT)
        out = convnet.output
        out = Flatten()(out)
        inputs.append(convnet.input)
        outputs.append(out)

    if len(outputs) > 1:
        out = concatenate(outputs)
    else:
        out = outputs[0]

    out = Dense(256,
                activation="relu",
                kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
    out = BatchNormalization()(out)

    if DEEP_DENSE_TOP:
        out = Dropout(DROPOUT)(out)
        out = Dense(128,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)
        out = Dense(64,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)
        out = Dense(32,
                    activation="relu",
                    kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
        out = BatchNormalization()(out)
        out = Dropout(DROPOUT)(out)

    if parameters["features"]:
        aux_input = Input(shape=(aux_size, ), name='aux_input')
        inputs.append(aux_input)
        out = concatenate([out, aux_input])

    out = Dense(16,
                activation="relu",
                kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)
    out = BatchNormalization()(out)
    predictions = Dense(1,
                        activation="sigmoid",
                        kernel_regularizer=l1_l2(l1=0.01, l2=0.01))(out)

    # creating the final model
    if len(inputs) > 1:
        model = Model(inputs=inputs, outputs=predictions)
    else:
        model = Model(inputs=inputs[0], outputs=predictions)

    # compile the model
    model.compile(loss="binary_crossentropy",
                  optimizer=OPTIMIZERS[OPTIMIZER](),
                  metrics=["accuracy"])

    return model
Ejemplo n.º 19
0
                        type=str,
                        default="0, 1")
    args = parser.parse_args()
    assert args.d in ["mnist", "cifar", 'imagenet'
                      ], "Dataset should be either 'mnist' or 'cifar'"
    assert args.attack in [
        "fgsm", "bim", 'jsma', 'c+w'
    ], "Dataset should be either 'fgsm', 'bim', 'jsma', 'c+w'"
    assert args.val_adv_ats ^ args.ts ^ args.random_train ^ args.random_train_ats ^ args.random_train_label ^ args.val_ats ^ args.lsa ^ args.dsa ^ args.conf ^ args.true_label ^ args.pred_label ^ args.adv_lsa ^ args.adv_dsa ^ args.adv_conf, "Select either 'lsa' or 'dsa' or etc."
    print(args)

    if args.d == 'imagenet':
        if args.model == 'efficientnetb0':
            model = efn.EfficientNetB0(weights='imagenet')
        if args.model == 'efficientnetb7':
            model = efn.EfficientNetB7(weights='imagenet')

        args.image_size = model.input_shape[1]
        args.num_classes = 1000

        if args.random_train == True:
            print(
                'Loading training IMAGENET dataset -----------------------------'
            )
            path_img_train = '../datasets/ilsvrc2012/images/train/'
            path_train_info = '../datasets/ilsvrc2012/images/train.txt'
            # load_imagenet_random_train(path_img=path_img_train, path_info=path_train_info, args=args)
            load_imagenet_random_train_ver2(path_img=path_img_train,
                                            path_info=path_train_info,
                                            args=args)
from keras.datasets import cifar10
from keras.engine import Model
from keras.layers import Dropout, Flatten, Dense, BatchNormalization
from keras.optimizers import Adam
from keras.utils import np_utils
import efficientnet.keras as efn
import matplotlib.pyplot as plt
import numpy as np
img_width, img_height = 32, 32
base_model = efn.EfficientNetB7(weights='imagenet',
                                include_top=False,
                                input_shape=(32, 32, 3),
                                drop_connect_rate=0.5)
nb_epoch = 50
nb_classes = 10
seed = 100
np.random.seed(seed)

(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)

last = base_model.get_layer('top_activation').output
x = Flatten()(last)
x = Dense(1024, activation='relu', kernel_initializer="he_uniform")(x)
x = Dense(512, activation='relu', kernel_initializer="he_uniform")(x)
x = Dense(256, activation='relu', kernel_initializer="he_uniform")(x)
x = Dropout(0.5)(x)
    dy = (h - nh) // 2
    image_data = 0  #開一個暫存
    if proc_img:
        image = image.resize((nw, nh),
                             Image.BICUBIC)  #双立方滤波。在输入图像的4*4矩阵上进行立方插值
        new_image = Image.new('RGB', (w, h), (128, 128, 128))  #開啟一個新圖像,放置128數值
        new_image.paste(image, (dx, dy))
        image_data = np.array(new_image) / 255.
        #image_data = image_data[np.newaxis,:,:,:]
        image_data = np.expand_dims(image_data, axis=0)
    return (image_data)  #反回資料並以灰階表示


input_shape = (224, 224)

model = efn.EfficientNetB7(weights='imagenet', include_top=False)
x = model.output
x = layers.GlobalAveragePooling2D()(x)

model = Model(inputs=model.input, outputs=x)

import os

img_path = 'train/train/17/'
files = os.listdir(img_path)

imgs = []
features = []
for f in files:
    img = test_resize(img_path + f, input_shape)
    imgs.append(img_path + f)
distribution = {}

SIZE = 350
batch_size = 8
maxepoches = 5
learning_rate = 0.001

config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True

import keras.optimizers as Optimizer
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau

base_model_1 = efn.EfficientNetB7(include_top=False,
                                  weights='imagenet',
                                  classes=42,
                                  input_shape=(SIZE, SIZE, 3))

model_1 = keras.Sequential()
model_1.add(base_model_1)
model_1.add(keras.layers.GlobalAveragePooling2D())
model_1.add(keras.layers.Dropout(0.25))
model_1.add(keras.layers.Dense(42, activation=('softmax')))

sgd = keras.optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
model_1.compile(optimizer=sgd,
                loss='categorical_crossentropy',
                metrics=['accuracy'])

directory = "shopee-product-detection-dataset/train/train/"
Ejemplo n.º 23
0
def build_model(input_shape, args):
    D = args.d
    F = args.f
    V = args.v

    input_tensor = Input(shape=input_shape)

    if args.tf == "in":
        base_model = InceptionV3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = in_pi
    elif args.tf == "inr":
        base_model = InceptionResNetV2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = inr_pi
    elif args.tf == "vg":
        base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = vg_pi
    elif args.tf == "xc":
        base_model = Xception(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = xc_pi
    elif args.tf == "re":
        base_model = ResNet50(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = re_pi
    elif args.tf == "de":
        base_model = DenseNet121(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = de_pi
    elif args.tf == "mo":
        base_model = MobileNet(weights='imagenet', include_top=False, input_tensor=input_tensor)
        #pi = mo_pi
    elif args.tf.find("ef") > -1:
        if args.tf == "ef0":
            base_model = efn.EfficientNetB0(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef1":
            base_model = efn.EfficientNetB1(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef2":
            base_model = efn.EfficientNetB2(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef3":
            base_model = efn.EfficientNetB3(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef4":
            base_model = efn.EfficientNetB4(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef5":
            base_model = efn.EfficientNetB5(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef6":
            base_model = efn.EfficientNetB6(weights='imagenet', include_top=False, input_tensor=input_tensor)
        elif args.tf == "ef7":
            base_model = efn.EfficientNetB7(weights='imagenet', include_top=False, input_tensor=input_tensor)
    else:
        print("unknown network type:", args.tf)
        exit()

    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(F, activation='relu')(x)
    if D > 0:
        x = Dropout(D)(x)
 
    pred = Dense(nb_classes, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=pred)

    layer_num = len(base_model.layers)
    for layer in base_model.layers[:int(layer_num * V)]:
        layer.trainable = False

    return model #, pi
Ejemplo n.º 24
0
from math import pow, floor


#-------------
def scheduler(epoch):
    init_lrate = 0.0002
    drop = 0.8
    epochs_drop = 2
    lrate = init_lrate * pow(drop, floor(1 + epoch) / epochs_drop)
    return lrate


change_Lr = LearningRateScheduler(scheduler)

efficient_net = efn.EfficientNetB7(weights='imagenet',
                                   include_top=False,
                                   input_shape=[150, 150, 3])
efficient_net.summary()

base_dir = "D:/document/data/food-11"
train_dir = os.path.join(base_dir, 'training')
validation_dir = os.path.join(base_dir, 'validation')

train_datagen = ImageDataGenerator(
    rescale=1. / 255,
    rotation_range=20,
    # width_shift_range=0.1,
    # height_shift_range=0.1,
    shear_range=0.1,
    zoom_range=0.1,
    horizontal_flip=True,