def build_model(number_labels):
    input_tensor = Input(shape=(image_size, image_size, 3))
    base_model = InceptionResNetV2(
        include_top=False,
        weights='imagenet',
        #input_tensor = None,
        input_tensor=input_tensor,
        #input_shape = None,
        input_shape=(image_size, image_size, 3),
        pooling='avg'
        #classes = 1000
    )
    for layer in base_model.layers:
        layer.trainable = True

    op = Dense(256, activation='relu')(base_model.output)
    op = Dropout(0.25)(op)
    output_tensor = Dense(number_labels, activation='softmax')(op)

    model = Model(inputs=input_tensor, outputs=output_tensor)
    return model
Example #2
0
def build_model(model_type=backbone_type):
    if model_type == 'alexnet':
        base_model = get_alexnet_backbone()
    elif model_type == 'manga_facenet':
        base_model = get_mangafacenet_backbone()
    elif model_type == 'sketch_a_net':
        base_model = get_sketch_a_net_backbone()
    elif model_type == 'inception_resnet_v2':
        base_model = InceptionResNetV2(include_top=False,
                                       weights='imagenet',
                                       input_shape=(img_size, img_size,
                                                    channel),
                                       pooling='avg')
    else:
        base_model = VGG16(include_top=False,
                           weights='imagenet',
                           input_shape=(img_size, img_size, channel),
                           pooling='avg')
    image_input = base_model.input
    x = base_model.layers[-1].output
    out = Dense(embedding_size)(x)
    image_embedder = Model(image_input, out)

    input_a = Input((img_size, img_size, channel), name='anchor')
    input_p = Input((img_size, img_size, channel), name='positive')
    input_n = Input((img_size, img_size, channel), name='negative')

    normalize = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='normalize')

    x = image_embedder(input_a)
    output_a = normalize(x)
    x = image_embedder(input_p)
    output_p = normalize(x)
    x = image_embedder(input_n)
    output_n = normalize(x)

    merged_vector = concatenate([output_a, output_p, output_n], axis=-1)

    model = Model(inputs=[input_a, input_p, input_n], outputs=merged_vector)
    return model
def get_resnet(num_of_layers, input_size):
    '''
    Returns the non-trainable InceptionResNetV2 layers as a Keras Model

    Given number of layers, this function iterates over the InceptionResNetV2
    model and returns the model till the layers where it matches the
    corresponding layer name. Here 'add_{}'.format(num_of_layers) is used
    to identify the end layer.

    Parameters
    ----------
    num_of_layers: int
        number of layers you want to select from the InceptionResNetV2 model
    input_size: int
        the dimension of square image fed as input to the Model

    Returns
    -------
    Model
        a non-trainable Keras Model instance containing the layers
        from InceptionResNetV2 model
    '''
    inceptionresnet = InceptionResNetV2(weights='imagenet',
                                        include_top=False,
                                        input_shape=(input_size, input_size,
                                                     3))
    i = 0
    while (True):
        if inceptionresnet.layers[i].name == 'block35_{}_ac'.format(
                num_of_layers):
            break
        i += 1
    model = Model(inputs=inceptionresnet.layers[0].input,
                  outputs=inceptionresnet.layers[i].output,
                  name='inceptionresnet')
    for layer in model.layers:
        layer.trainable = False
    model.compile('adadelta', 'mse')
    return model
Example #4
0
def load_model(model):
    K.clear_session()
    if model == "ResNet152":
        base_model = ResNet152(include_top=False,
                               weights='imagenet',
                               input_shape=(299, 299, 3),
                               pooling="max")
    if model == "InceptionV3":
        base_model = InceptionV3(include_top=False,
                                 weights='imagenet',
                                 input_shape=(299, 299, 3),
                                 pooling="max")
    if model == "InceptionResNetV2":
        base_model = InceptionResNetV2(include_top=False,
                                       weights='imagenet',
                                       input_shape=(299, 299, 3),
                                       pooling="max")
    if model == "Xception":
        base_model = Xception(include_top=False,
                              weights='imagenet',
                              input_shape=(299, 299, 3),
                              pooling="max")
    if model == "VGG":
        base_model = VGG19(include_top=False,
                           weights='imagenet',
                           input_shape=(299, 299, 3),
                           pooling="max")
    base_model.summary()
    x = base_model.output
    dense1_ = Dense(512, activation='relu')
    dense1 = dense1_(x)
    x = Dropout(0.2)(dense1)
    dense2 = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(dense2)
    dense3 = Dense(128, activation='relu')(x)
    pred_output = Dense(1, activation='sigmoid')(dense3)
    model = Model(inputs=base_model.input, outputs=[pred_output])
    model.summary()
    return model
Example #5
0
def getKerasModel(categorie_size, img_height, img_width):
    # # Base model Conv layers + Customize FC layers
    if K.image_data_format() == 'channels_first':
        the_input_shape = (3, img_width, img_height)
    else:
        the_input_shape = (img_width, img_height, 3)
    # don't include the top (final FC) layers.
    base_model = InceptionResNetV2(weights='imagenet',
                                   include_top=False,
                                   input_shape=the_input_shape)
    # add FC layers.
    x = base_model.output
    x = Dropout(0.5, name='dropout_1')(x)
    x = GlobalAveragePooling2D(name='avg_pool')(x)
    predictions = Dense(categorie_size,
                        activation='softmax',
                        name='output_layer')(x)
    # this is the final model we will train
    model = Model(inputs=base_model.input, outputs=predictions)
    model.summary()

    return base_model, model
def create_model(train_generator, validation_generator):
    l2_reg = regularizers.l2({{loguniform(log(1e-6), log(1e-2))}})
    base_model = InceptionResNetV2(weights='imagenet', include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dropout({{uniform(0, 1)}})(x)
    x = Dense(1024,
              activation='relu',
              kernel_regularizer=l2_reg,
              activity_regularizer=l2_reg)(x)
    x = Dropout({{uniform(0, 1)}})(x)
    predictions = Dense(num_classes,
                        activation='softmax',
                        kernel_regularizer=l2_reg,
                        activity_regularizer=l2_reg)(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    model_weights_path = os.path.join('models', best_model)
    model.load_weights(model_weights_path)

    for i in range(int(len(base_model.layers) * {{uniform(0, 1)}})):
        layer = base_model.layers[i]
        layer.trainable = False

    adam = keras.optimizers.Adam(lr={{loguniform(log(1e-6), log(1e-3))}})
    model.compile(loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  optimizer=adam)

    # print(model.summary())

    model.fit_generator(train_generator,
                        steps_per_epoch=num_train_samples // batch_size,
                        validation_data=validation_generator,
                        validation_steps=num_valid_samples // batch_size)

    score, acc = model.evaluate_generator(validation_generator)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Example #7
0
def get_inception_model(params):
    input_tensor = Input(shape=(*params['dim'], 3))
    x = BatchNormalization()(input_tensor)

    net = InceptionResNetV2(include_top=False,
                          weights='imagenet',
                          input_tensor=None,
                          input_shape=(*params['dim'], 3))

    x = net(x)
    x = get_conv_layer(x, 256, (1,1), bn=False, pool=False, drop=False)
    x = get_conv_layer(x, 64, (1,1), bn=False, pool=False, drop=False)
    x = Flatten()(x)
    x = Dropout(0.5)(x)

    x = get_fc_layer(x, 1024)
    x = get_fc_layer(x, 256)

    output_layer = Dense(params['n_classes'], activation='sigmoid')(x)
    model = Model(inputs=input_tensor, outputs=output_layer)

    return model
Example #8
0
def create_model_ResNetV2():
    INPUT_SIZE = 256
    xi = Input([INPUT_SIZE, INPUT_SIZE, 3])
    x = Reshape([INPUT_SIZE, INPUT_SIZE, 3])(xi)

    ir = InceptionResNetV2(include_top=False,
                           weights='imagenet',
                           input_tensor=x,
                           input_shape=(INPUT_SIZE, INPUT_SIZE, 3),
                           pooling='avg')

    p = Dense(512, kernel_initializer='glorot_normal',
              activation='relu')(ir.output)
    p = Dense(1024, kernel_initializer='glorot_normal',
              activation='relu')(ir.output)
    p = Dense(CLASSES, activation='softmax')(p)

    # ============ Model
    model = Model(ir.input, p)
    model.compile('adam', 'categorical_crossentropy', metrics=['acc'])
    # model.summary()
    return model
Example #9
0
    def InceptionResNetV2_PlusFC(self, params):

        assert len(params['INPUTS'].keys()) == 1, 'Number of inputs must be one.'
        assert params['INPUTS'][params['INPUTS'].keys()[0]]['type'] == 'raw-image', 'Input must be of type "raw-image".'

        self.ids_inputs = params['INPUTS'].keys()
        self.ids_outputs = params['OUTPUTS'].keys()

        input_shape = params['INPUTS'][params['INPUTS'].keys()[0]]['img_size_crop']
        image = Input(name=self.ids_inputs[0], shape=input_shape)

        ##################################################
        # Load Inception model pre-trained on ImageNet
        self.model = InceptionResNetV2(weights='imagenet', input_tensor=image)

        # Recover input layer
        #image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model: 'fc2'
        x = self.model.get_layer('avg_pool').output
        ##################################################
        #x = Flatten()(x)
        # Define outputs
        outputs_list = []
        for id_name, data in params['OUTPUTS'].iteritems():

            # Count the number of output classes
            num_classes = 0
            with open(params['DATA_ROOT_PATH']+'/'+data['classes'], 'r') as f:
                for line in f: num_classes += 1
            if params['EMPTY_LABEL']:
                num_classes += 1
            # Define only a FC output layer (+ activation) per output
            out = Dense(num_classes)(x)
            out_act = Activation(data['activation'], name=id_name)(out)
            outputs_list.append(out_act)


        self.model = Model(input=image, output=outputs_list)
Example #10
0
def image_data_convert(file_list, network, resize_method, include_top=False):
    print('画像から特徴量を抽出中…')
    output_features = []
    if network == 'inception_resnet_v2':
        img_list = []
        for j in tqdm(range(len(file_list))):
            img_file = file_list[j]
            orig_img = Image.open('./thumbnails/' + img_file + '.jpg')
            resized_img = resize_image(orig_img, 299, 299, resize_method)
            img_list.append(resized_img)
        img_list = np.array(img_list)

        inputs = Input(shape=(299, 299, 3))
        model = InceptionResNetV2(include_top=include_top,
                                  weights='imagenet',
                                  input_tensor=inputs)
        output_features = model.predict(img_list)
    elif network == 'mobilenet_v2':
        img_list = []
        for j in tqdm(range(len(file_list))):
            img_file = file_list[j]
            orig_img = Image.open('./thumbnails/' + img_file + '.jpg')
            resized_img = resize_image(orig_img, 224, 224, resize_method)
            img_list.append(resized_img)
        img_list = np.array(img_list)

        inputs = Input(shape=(224, 224, 3))
        model = MobileNetV2(include_top=include_top,
                            weights='imagenet',
                            input_tensor=inputs)  #一度include_topをtrueにしてテスト
        output_features = model.predict(img_list)
        #print(output_features[0])
    else:
        return None
    final_out = []
    for i in range(len(output_features)):
        final_out.append(output_features[i].flatten())
    final_out = np.array(final_out)
    return final_out
Example #11
0
def load_inception_resnetv2(input_size):
    model = Sequential()
    model.add(
        BatchNormalization(input_shape=(input_size, input_size,
                                        input_channels)))
    base_model = InceptionResNetV2(include_top=False,
                                   weights="imagenet",
                                   input_shape=(input_size, input_size,
                                                input_channels))
    model.add(base_model)
    model.add(GlobalAveragePooling2D())
    model.add(Dense(n_classes, activation="softmax"))

    # change the trainable to True when doing fine tuning
    # for layer in base_model.layers:
    #     layer.trainable = True

    for layer in base_model.layers:
        layer.trainable = False  # turn trainable flag to true when doing fine tuning
    model.summary()
    print("training with inception_resnetv2")
    return model
Example #12
0
def inception_resnet_retinanet(num_classes,
                               backbone='inception_resnet',
                               inputs=None,
                               modifier=None,
                               **kwargs):
    """ Constructs a RDD_2018 model using a densenet backbone.

    Args
        num_classes: Number of classes to predict.
        backbone: Which backbone to use (one of ('densenet121', 'densenet169', 'densenet201')).
        inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
        modifier: A function handler which can modify the backbone before using it in RDD_2018 (this can be used to freeze backbone layers for example).

    Returns
        RetinaNet model with a DenseNet backbone.
    """
    # choose default input
    if inputs is None:
        inputs = keras.layers.Input(shape=(600, 600, 3))

    # create the inception resnet backbone

    inception_resnet = InceptionResNetV2(input_tensor=inputs,
                                         include_top=False)
    if modifier:
        inception_resnet = modifier(inception_resnet)

    # create the full model
    # print(inception_resnet.summary())
    # layer_names = ["block35_10_mixed", "block17_20_mixed", "block8_10_mixed"]
    layer_names = ["mixed_6a", "mixed_7a", "conv_7b"]
    layer_outputs = [
        inception_resnet.get_layer(name).output for name in layer_names
    ]
    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=layer_outputs,
                               **kwargs)
Example #13
0
def create_model_inception(out_shape, config, in_shape=(120, 160, 1)):
    out_height, out_width = out_shape

    # increase channels from 1 -> 3
    input_layer = Input(in_shape)
    input_stacked = Lambda(stack_channels,
                           output_shape=output_of_stack_channels)(input_layer)

    # learn a normalization, roughly map distributions Kinect data -> RGB
    input_normalized = BatchNormalization()(input_stacked)

    # pre-trained inception feature extraction
    weights = 'imagenet' if config["pretrained"] else None
    base_model = InceptionResNetV2(weights=weights,
                                   include_top=False,
                                   input_shape=(120, 160, 3))
    #plot_model(base_model, to_file='model_inception.png', show_shapes=True, show_layer_names=True)
    # , input_tensor=input_normalized)
    # x = base_model.output
    features = Model(inputs=input_layer, outputs=base_model(input_normalized))
    x = features.output

    # add MLP on top

    x = Flatten()(x)
    x = Dense(config["num_features"])(x)
    x = Dense(out_height * out_width)(x)
    x = Reshape((out_height, out_width))(x)

    # combine into single model object
    model = Model(inputs=input_layer, outputs=x)

    # freeze layers
    if config["pretrained"]:
        for layer in base_model.layers:
            layer.trainable = False

    return model
Example #14
0
def InceptionNet(x_train, y_train, params):
    base_model = InceptionResNetV2(
        include_top=False,
        input_shape=(params["img_size"], params["img_size"], 3),
        weights="imagenet",
    )
    for layer in base_model.layers[:params["freeze_layers"]]:
        layer.trainable = False
    for layer in base_model.layers[params["freeze_layers"]:]:
        layer.trainable = True

    # for layer in base_model.layers:
    #     print(layer, layer.trainable)

    model = Sequential()
    model.add(base_model)
    model.add(GlobalAveragePooling2D())
    # model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(8, activation="softmax"))
    model.summary()
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    # es = EarlyStopping(mode="min", verbose=1, patience=10)
    mc = ModelCheckpoint("model.h5", monitor="val_acc", save_best_only=True)
    out = model.fit(
        x_train,
        y_train,
        validation_split=0.1,
        epochs=params["epochs"],
        batch_size=params["batch_size"],
        verbose=1,
        shuffle=True,
        callbacks=[mc],
    )

    return out, model
Example #15
0
def get_model(model_name, shape, n_classes):
    if model_name == 'xception':
        base_model = Xception(include_top=False,
                              input_shape=(shape, shape, 3),
                              pooling='avg')
        drop = .1
    elif model_name == 'incres':
        base_model = InceptionResNetV2(include_top=False,
                                       input_shape=(shape, shape, 3),
                                       pooling='avg')
        drop = .2
    elif model_name == 'inception':
        base_model = InceptionV3(include_top=False,
                                 input_shape=(shape, shape, 3),
                                 pooling='avg')
        drop = .1
    elif model_name == 'resnet':
        base_model = ResNet50(include_top=False,
                              input_shape=(shape, shape, 3),
                              pooling='avg')
        drop = .1
    elif model_name == 'mobilenet':
        base_model = MobileNet(include_top=False,
                               input_shape=(shape, shape, 3),
                               pooling='avg')
        drop = .1
    else:
        raise ValueError('Network name is unknown')

    x = base_model.output
    x = Dropout(drop)(x)
    predictions = Dense(n_classes, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in base_model.layers:
        layer.trainable = False

    return model, preprocess_input
Example #16
0
def main():
    parser = argparse.ArgumentParser(description='Run experiment')
    parser.add_argument('--imgpath',
                        default='results.json',
                        type=str,
                        help='Path to directory of input images')
    parser.add_argument(
        '--dest',
        default='/gradcam/',
        type=str,
        help=
        'Name/path of destination directory for Grad-CAM images to be saved')
    args = parser.parse_args()

    # # check files
    # if not args.img_dir.exists():
    #     raise FileNotFoundError(args.img_dir)

    model = InceptionResNetV2(include_top=True,
                              weights="imagenet",
                              classes=1000)

    get_gradcam_images(args.imgpath, model, args.dest)
Example #17
0
def inception_resnetv2(input_image_size, number_of_output_categories):

    model = InceptionResNetV2(include_top=False,
                              weights="imagenet",
                              input_shape=(input_image_size, input_image_size,
                                           3))

    #Adding custom Layers
    x = model.output
    x = Flatten()(x)
    x = Dense(1024, activation="relu")(x)
    x = Dropout(0.5)(x)
    x = Dense(512, activation="relu")(x)
    predictions = Dense(number_of_output_categories, activation='sigmoid')(x)

    # creating the final model
    model = Model(input=model.input, output=predictions)

    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(),
                  metrics=['accuracy'])

    return model
 def _get_base_model(self):
     """
     Define base model used in transfer learning.
     """
     if self.base_model == 'VGG16':
         base_model = VGG16(weights='imagenet',
                            include_top=False,
                            input_shape=self.X.shape[1:])
     elif self.base_model == 'VGG19':
         base_model = VGG19(weights='imagenet',
                            include_top=False,
                            input_shape=self.X.shape[1:])
     elif self.base_model == 'ResNet50':
         base_model = ResNet50(weights='imagenet',
                               include_top=False,
                               input_shape=self.X.shape[1:])
     elif self.base_model == 'Xception':
         base_model = Xception(weights='imagenet',
                               include_top=False,
                               input_shape=self.X.shape[1:])
     elif self.base_model == 'InceptionV3':
         base_model = InceptionV3(weights='imagenet',
                                  include_top=False,
                                  input_shape=self.X.shape[1:])
     elif self.base_model == 'InceptionResNetV2':
         base_model = InceptionResNetV2(weights='imagenet',
                                        include_top=False,
                                        input_shape=self.X.shape[1:])
     elif self.base_model == 'MobileNet':
         base_model = MobileNet(weights='imagenet',
                                include_top=False,
                                input_shape=self.X.shape[1:])
     else:
         raise ValueError(
             'Valid base model values are: "VGG16","VGG19","ResNet50","Xception", \
                          "InceptionV3","InceptionResNetV2","MobileNet".')
     return base_model
Example #19
0
def declare_model(num_classes, architecture, model_info, dropout=0, weights='imagenet'):
    if architecture == 'inception_v3':
        base_model = InceptionV3(weights = weights, include_top=False)

    elif architecture == 'inception_resnet_v2':
        base_model = InceptionResNetV2(weights=weights, include_top=False)

    elif architecture == 'resnet_v2':
        base_model = resnet152_model(model_info['input_width'], model_info['input_height'], model_info['input_depth'],
                                     model_info['pretrained_weights'], num_classes)

    num_base_layers = len(base_model.layers)
    init = TruncatedNormal(mean=0.0, stddev=0.001, seed=None)
    input = base_model.input
    x = base_model.output

    if architecture == 'inception_v3' or 'inception_resnet_v2':
        init = TruncatedNormal(mean=0.0, stddev=0.001, seed=None)
        input = base_model.input
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # x = Dense(64, input_shape=(2048,), activation='relu',  kernel_initializer=init)(x)
        x = Dropout(dropout)(x)

        predictions = Dense(num_classes, input_shape=(model_info['bottleneck_tensor_size'],), activation='softmax', kernel_initializer=init)(x)
        model = Model(input=input, outputs=predictions)

        return model, num_base_layers

    elif architecture == 'resnet_v2':
        x_newfc = AveragePooling2D((7, 7), name='avg_pool')(x)
        x_newfc = Flatten()(x_newfc)
        x_newfc = Dropout(dropout)(x_newfc)
        x_newfc = Dense(num_classes, activation='softmax', name='fc8', kernel_initializer=init)(x_newfc)
        model = Model(input=input, outputs=x_newfc)

        return model, num_base_layers
Example #20
0
def main():
    input_shape = 250, 250, 3

    base = InceptionResNetV2(input_shape=input_shape, include_top=False)

    x = GlobalAvgPool2D()(base.output)
    x = Dense(units=500, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(units=500, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(units=500, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(units=NUM_OF_LANDMARKS * 2, activation='linear')(x)

    model = Model(inputs=base.input, outputs=x)
    model.compile(optimizer='adam', loss=root_mean_squared_error)

    train_gen = AnnotatedImagesGenerator(root=LFPW_TRAIN,
                                         batch_size=32,
                                         target_size=input_shape[:2],
                                         infinite=True,
                                         model_preprocessing=preprocess_input,
                                         augment=True)

    valid_gen = AnnotatedImagesGenerator(root=LFPW_VALID,
                                         batch_size=32,
                                         target_size=input_shape[:2],
                                         infinite=True,
                                         model_preprocessing=preprocess_input,
                                         augment=False)

    model.fit_generator(train_gen,
                        steps_per_epoch=train_gen.n_batches,
                        validation_data=valid_gen,
                        validation_steps=valid_gen.n_batches)

    print('Done!')
def inception_resnetv2(img_height, img_width=None, dropout=0.25):
    if not img_width:
        img_width = img_height
    input_tensor = Input(shape=(img_height, img_width, 3))
    input_tensor = Lambda(inception_resnet2_process)(input_tensor)  # 以为加了,补上
    base_model = InceptionResNetV2(input_tensor=input_tensor,
                                   weights='imagenet',
                                   include_top=False)

    for layers in base_model.layers:
        layers.trainable = False

    x = GlobalAveragePooling2D()(base_model.output)
    x = Dropout(dropout)(x)
    x = Dense(1, activation='sigmoid', kernel_initializer='he_normal')(x)

    model_inceptionResNetV2 = Model(inputs=base_model.input, outputs=x)
    model_inceptionResNetV2.compile(optimizer='adadelta',
                                    loss='binary_crossentropy',
                                    metrics=['accuracy'])
    # model_inceptionResNetV2.summary()
    print('model_inceptionResNetV2 has %d layers.' %
          len(model_inceptionResNetV2.layers))
    return model_inceptionResNetV2
Example #22
0
def generate_model(application, num_class, img_size, pre_weights=None):
    if application == 'InceptionV3':
        base_model = InceptionV3(input_shape=(img_size, img_size, 3),
                                 include_top=False,
                                 weights=pre_weights)
    elif application == 'MobileNet':
        base_model = MobileNet(input_shape=(img_size, img_size, 3),
                               include_top=False,
                               weights=pre_weights)
    elif application == 'VGG19':
        base_model = VGG19(input_shape=(img_size, img_size, 3),
                           weights=pre_weights,
                           include_top=None)
    elif application == 'InceptionResNetV2':
        base_model = InceptionResNetV2(input_shape=(img_size, img_size, 3),
                                       weights=pre_weights,
                                       include_top=None)
    elif application == 'Xception':
        base_model = Xception(input_shape=(img_size, img_size, 3),
                              weights=pre_weights,
                              include_top=None)
    else:
        raise Exception('No specific aplication type!')

    x = base_model.output
    feature = Flatten(name='feature')(x)
    predictions = Dropout(0.5)(feature)
    # x = GlobalAveragePooling2D()(x)
    # predictions = Dense(1024, activation='relu')(x)
    predictions = Dense(num_class, activation='softmax',
                        name='pred',
                        kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(predictions)
    model = Model(inputs=base_model.input, outputs=[predictions, feature])
    Model.summary(model)
    print(model.output)
    return model
Example #23
0
	def create_model(self):
		resizer = models.Sequential()
		resizer.add(layers.Lambda(lambda image: ktf.image.resize_images(image,
			(299,299)), input_shape=self.input_shape))


		feature_extractor = InceptionResNetV2(include_top=False,
			input_shape=(299,299,3))

		for layer in feature_extractor.layers:
			layer.trainable = False

		classifier = models.Sequential()
		classifier.add(layers.Flatten())
		#classifier.add(layers.Dense(self.fc_neurons,activation='relu'))
		#classifier.add(layers.Dropout(self.dropout_rate))
		classifier.add(layers.Dense(self.num_classes,activation='softmax'))

		optimizer = optimizers.Adam(lr=self.lr)

		model = models.Sequential([resizer, feature_extractor,
			classifier])

		if self.gpus > 1:
			parallel_model = multi_gpu_model(model, gpus=self.gpus)
			parallel_model.compile(optimizer=optimizer,
				loss=keras.losses.categorical_crossentropy,
				metrics=['accuracy'])
			model.summary()
			return parallel_model
		else:
			model.compile(optimizer=optimizer,
				loss=keras.losses.categorical_crossentropy,
				metrics=['accuracy'])
			model.summary()
			return model
Example #24
0
def build_model(input_shape):

    input_tensor = Input(shape=(299, 299, 3))
    # 构建不带分类器的预训练模型
    base_model = InceptionResNetV2(weights=resnet_weights_path,
                                   input_tensor=input_tensor,
                                   include_top=False)

    # 添加全局平均池化层
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    '''
    #x = Dropout(0.5)(x)
    def l1_reg(weight_matrix):
        return 0.01 * K.sum(K.abs(weight_matrix))
    # 添加一个全连接层
    x = Dense(1024, activation='relu',                 
                kernel_initializer='random_uniform',
                bias_initializer='zeros',
                kernel_regularizer=regularizers.l1_l2(1e-4),
                #activity_regularizer=regularizers.l1(0.01),
                kernel_constraint=max_norm(2.)
                )(x)
    x = Dropout(0.5)(x)'''
    predictions = Dense(8, activation='softmax')(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    learning_rate = 0.1
    decay = 0.001
    sgd = SGD(lr=learning_rate, decay=decay, momentum=0.9, nesterov=True)

    model.compile(optimizer=sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model  # 返回构建好的模型
Example #25
0
    def get_scores(self, video, frame_cuts):
        """[summary]

        Args:
            video ([type]): [description]
            frame_cuts ([type]): [description]

        Returns:
            [type]: [description]
        """
        base_model = InceptionResNetV2(input_shape=(None, None, 3),
                                       include_top=False,
                                       pooling='avg',
                                       weights=None)
        x = Dropout(0.75)(base_model.output)
        x = Dense(10, activation='softmax')(x)

        model = Model(base_model.input, x)
        model.load_weights('weights/inception_resnet_weights.h5')
        score_list = []
        for start, end in tqdm(frame_cuts, total=len(frame_cuts)):
            if end - start > 1000:
                frames = video[start:end // 2, :, :, :].copy()
                if frames.shape[0] == 0:
                    continue
                best_section_score = self.predict_scores(frames, model, start)
                start = end // 2
            frames = video[start:end, :, :, :].copy()
            if frames.shape[0] == 0:
                continue
            best_score = self.predict_scores(frames, model, start)
            if end - start > 1000 and best_section_score[0] > best_score[0]:
                best_score = best_section_score
            score_list.append(best_score)
        heapq.heapify(score_list)
        return score_list
Example #26
0
    # Example to fine-tune on 3000 samples from Cifar10

    img_rows, img_cols = 299, 299 # Resolution of inputs
    channel = 3
    num_classes = 10
    batch_size = 32
    nb_epoch = 100

    # Load Cifar10 data. Please implement your own load_data() module for your own dataset
    X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)

    # Load our model
    # model = inception_v4_model(img_rows, img_cols, channel, num_classes, dropout_keep_prob=0.2)

    # use keras InceptionResNetV2
    model = InceptionResNetV2(weights=None, classes=10)
    # Learning rate is changed to 0.001
    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    # Start Fine-tuning
    model.fit(X_train, Y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              shuffle=True,
              verbose=1,
              validation_data=(X_valid, Y_valid),
              )

    # Make predictions
    predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
# give priority to directory
if args.dir is not None:
    print("Loading images from directory : ", args.dir)
    imgs = Path(args.dir).files('*.png')
    imgs += Path(args.dir).files('*.jpg')
    imgs += Path(args.dir).files('*.jpeg')

elif args.img[0] is not None:
    print("Loading images from path(s) : ", args.img)
    imgs = args.img

else:
    raise RuntimeError('Either -dir or -img arguments must be passed as argument')

with tf.device('/CPU:0'):
    base_model = InceptionResNetV2(input_shape=(None, None, 3), include_top=False, pooling='avg', weights=None)
    x = Dropout(0.75)(base_model.output)
    x = Dense(10, activation='softmax')(x)

    model = Model(base_model.input, x)
    model.load_weights('weights/inception_resnet_weights.h5')

    score_list = []

    for img_path in imgs:
        img = load_img(img_path, target_size=target_size)
        x = img_to_array(img)
        x = np.expand_dims(x, axis=0)

        x = preprocess_input(x)
Example #28
0
def Make_Model(modelConfig, datasetConfig):

    strModelType = modelConfig.MODEL_TYPE
    strPretrained = modelConfig.PRETRAINED_MODEL
    im_Shape = datasetConfig.IMG_SHAPE
    strOptimizer = modelConfig.OPTIMIZER
    num_Classes = datasetConfig.NUM_CLASS
    learingRate = modelConfig.LEARNING_RATE
    decay = modelConfig.DECAY
    momentum = modelConfig.MOMENTUM
    loss = modelConfig.LOSS

    optimizer = None

    if (strOptimizer == "SGD"):
        optimizer = SGD(lr=learingRate,
                        decay=decay,
                        momentum=momentum,
                        nesterov=True)  # decay = 1e-4
    elif (strOptimizer == "ADAM"):
        optimizer = Adam(lr=learingRate, decay=decay)
    else:
        print("No Such a Optimizer")
        return None

    model = None

    Num = 2

    if (strModelType == "VGG16"):
        model = VGG16(weights=strPretrained,
                      include_top=False,
                      input_shape=im_Shape,
                      classes=Num)
    elif (strModelType == "RESNET50"):
        model = ResNet50(weights=strPretrained,
                         include_top=True,
                         input_shape=im_Shape,
                         classes=Num)
    elif (strModelType == "RESNET152"):
        model = build_Resnet152_Model(im_Shape, Num, strPretrained)
    elif (strModelType == "INCEPTIONV3"):
        model = InceptionV3(weights=strPretrained,
                            include_top=True,
                            input_shape=im_Shape,
                            classes=Num)
    elif (strModelType == "INCEPTIONRESV2"):
        model = InceptionResNetV2(weights=strPretrained,
                                  include_top=True,
                                  input_shape=im_Shape,
                                  classes=Num)
    elif (strModelType == "SEINCEPTIONRESV2"):
        model = SEInceptionResNetV2(weights=strPretrained,
                                    include_top=True,
                                    input_shape=im_Shape,
                                    classes=Num)
    elif (strModelType == "XCEPTION"):
        model = Xception(weights=strPretrained,
                         include_top=True,
                         input_shape=im_Shape,
                         classes=Num)
        #basemodel = Xception(weights="imagenet", include_top=True, input_shape=(299,299,3), classes = 1000)
        #x = Dense(num_Classes, activation='softmax', name='predictions')(basemodel.layers[-2].output)
        #model = Model(basemodel.input, x)
    elif (strModelType == "UNET2D"):
        model = build_UNet2D_4L(im_Shape, strPretrained)
    elif (strModelType == "CNN6Layers"):
        model = build_CNN_6layers(im_Shape, num_classes=num_Classes)
    else:
        print("No Such Model Type")
        return None

    # for layer in model.layers[:-15] :
    #     layer.trainable = False

    # oldlayer = model.layers.pop(-1)
    # model.layers.pop(-1)
    x_xc = model.get_layer(index=-2).output
    #print(x_xc.output_shape)
    # print(x_xc.shape)
    #x_xc = GlobalMaxPooling2D()(x_xc)
    out = Dense(int(num_Classes), activation='softmax', name='pp')(x_xc)

    model = Model(input=model.input, output=out)

    # upperlayer = model.layers[-2]
    # output = Dense(int(num_Classes), activation='softmax')(upperlayer)
    #model.trainable = False
    #model = model (input = model.layers[0], output = output)
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    model.summary()

    return model
print('tst.shape', X_tst.shape, Y_tst.shape)

# model construction (keras)
if model_id == 1:
    base_model = VGG19(weights='imagenet', pooling='avg', include_top=False)
elif model_id == 2:
    base_model = InceptionV3(weights='imagenet',
                             pooling='avg',
                             include_top=False)
elif model_id == 3:
    base_model = ResNet50V2(weights='imagenet',
                            pooling='avg',
                            include_top=False)
elif model_id == 4:
    base_model = InceptionResNetV2(weights='imagenet',
                                   pooling='avg',
                                   include_top=False)

predictions = Dense(8, activation='softmax')(base_model.output)

model = Model(inputs=base_model.input, outputs=predictions)
#model.summary()

# data augmentation and scaling
data_gen_args = dict(rotation_range=360,
                     width_shift_range=0.15,
                     height_shift_range=0.15,
                     zoom_range=0.15,
                     brightness_range=[0.5, 1.5],
                     horizontal_flip=True,
                     vertical_flip=False,
Example #30
0
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.preprocessing import image
from keras.applications.inception_resnet_v2 import preprocess_input
from keras.applications.inception_resnet_v2 import decode_predictions
import numpy as np

model = InceptionResNetV2(weights='imagenet')

#image_path = 'elephant.jpg'
#image_path = 'muppet2.jpg'
#image_path = 'algo.jpg'
#image_path = 'gioconda.jpg'
image_path = 'perros.jpg'

img = image.load_img(image_path, target_size=(299, 299))

x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)

for index, res in enumerate(decode_predictions(preds, top=10)[0]):
    print('{}. \t {}\t:{:.3f}%'.format(index + 1, res[1], 100 * res[2]))