Ejemplo n.º 1
0
def xception_retinanet(num_classes,
                       backbone='xception',
                       inputs=None,
                       modifier=None,
                       **kwargs):

    k.clear_session()
    # choose default input
    if inputs is None:
        if keras.backend.image_data_format() == 'channels_first':
            inputs = keras.layers.Input(shape=(3, None, None))
        else:
            inputs = keras.layers.Input(shape=(None, None, 3))

    # create the resnet backbone
    if backbone == 'xception':
        xception_model = Xception(weights='imagenet',
                                  include_top=False,
                                  input_tensor=inputs)
    else:
        raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))

    # invoke modifier if given
    if modifier:
        xception_model = modifier(xception_model)
    concatenated_features = [
        xception_model.get_layer("add_7").output,
        xception_model.get_layer("add_10").output, xception_model.output
    ]
    # create the full model
    return retinanet.retinanet(inputs=inputs,
                               num_classes=num_classes,
                               backbone_layers=concatenated_features,
                               **kwargs)
Ejemplo n.º 2
0
def save_model3(new_model_path, conv_model_path):
	model = Xception(
		input_shape=(img_width, img_height, 3),
		include_top=False,
		weights=None
	)
	if pretrained:
		model = Xception(
			input_shape=(img_width, img_height, 3),
			include_top=False,
			weights='imagenet'
		)
	transfer_layer = model.get_layer('block14_sepconv2_act')
	conv_model = Model(inputs=model.input,
					   outputs=transfer_layer.output)
	new_model = Sequential()
	new_model.add(conv_model)
	new_model.add(GlobalAveragePooling2D())
	if num_fc_layers>=1:
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=2:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	if num_fc_layers>=3:
		new_model.add(Dropout(dropout))
		new_model.add(Dense(num_fc_neurons, activation='relu'))
	new_model.add(Dense(num_classes, activation='softmax'))

	print(new_model.summary())

	new_model.save(new_model_path)
	conv_model.save(conv_model_path)
Ejemplo n.º 3
0
def xcpt(input_shape=None, channels=2, lr=1e-4, weights=None, classes=4, **kwargs):

    input_shape_xception = (None, None, 3) if K.image_data_format() == 'channels_last' else (3, None, None)
    xception_model = Xception(input_shape=input_shape_xception, include_top=False, weights='imagenet')
    xception_model = Model(xception_model.input, xception_model.get_layer('block13_sepconv2_bn').output)

    if input_shape is None:
        if K.image_data_format() == 'channels_last':
            input_shape = (None, None, channels)
        else:
            input_shape = (channels, None, None)
    main_input = Input(input_shape)
    x = Convolution2D(3, (1, 1), kernel_initializer=kernel_initializer)(main_input)
    x = xception_model(x)
    x = GlobalAveragePooling2D(name='pool1')(x)
    main_output = Dense(classes, activation='softmax', name='predictions')(x)

    # Create model.
    model = Model(main_input, main_output, name='xcpt')

    if weights is not None:
        print('Load weights from', weights)
        model.load_weights(weights)

    optimizer = SGD(lr, momentum=0.95, decay=0.0005, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])

    return model
def get_model_xception_pretrained(input_shape=(75, 75, 3), inputs_meta=1):
    dropout = 0.25
    optimizer = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
    #Building the model

    base_model = Xception(weights='imagenet', include_top=False, 
                 input_shape=input_shape, classes=1)

    input_meta = Input(shape=[inputs_meta], name='meta')
    input_meta_norm = BatchNormalization()(input_meta)

    x = base_model.get_layer('block14_sepconv2_act').output

    x = GlobalMaxPooling2D()(x)
    concat = concatenate([x, input_meta_norm], name='features_layer')
    fc1 = Dense(512, activation='relu', name='fc2')(concat)
    fc1 = Dropout(dropout)(fc1)
    fc2 = Dense(256, activation='relu', name='fc3')(fc1)
    fc2 = Dropout(dropout)(fc2)

    # Sigmoid Layer
    output = Dense(1)(fc2)
    output = Activation('sigmoid')(output)

    model = Model(inputs=[base_model.input, input_meta], outputs=output)

    model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])

    return model
Ejemplo n.º 5
0
def create_model(input_shape, n_out):
    input_tensor = Input(shape=(SIZE, SIZE, 3))
    #bn = BatchNormalization()(input_tensor)
    #conv = Conv2D(3,(3,3),padding='same',activation='relu')(bn)
    base_model = Xception(include_top=False,
                          weights='imagenet',
                          input_shape=(SIZE, SIZE, 3),
                          input_tensor=input_tensor)

    x = base_model.get_layer('block13_sepconv2_bn').output

    #x = Dropout(0.5)(x)
    x_map32 = Conv2D(28,
                     kernel_size=(1, 1),
                     activation='sigmoid',
                     name='map32')(x)
    x_pooled = NoisyAndPooling2D()(x_map32)
    output = Dense(28, activation='sigmoid', name='pred')(x_pooled)
    model = Model(input_tensor, [output, x_map32])

    # transfer imagenet weights
    #res_img = ResNet34(include_top=False, weights='imagenet', input_shape=(SIZE, SIZE, 3))
    #offset = 2
    #for i, l in enumerate(base_model.layers[offset+1:]):
    #    l.set_weights(res_img.layers[i + 1].get_weights())

    return model
    def get_model(input_shape=(513, 513, 3),
                  atrous_rate=(6, 12, 18),
                  class_no=21,
                  freezeEncoder=False):
        input_tensor = layers.Input(shape=input_shape)
        with tf.variable_scope("encoder"):
            encoder = Xception(include_top=False,
                               weights='imagenet',
                               input_tensor=input_tensor)
            x_output = encoder.output

            if freezeEncoder:
                for layer in encoder.layers:  #  not available as pre train model is not ready here.
                    layer.trainable = False

            x = Xception_DeepLabV3Plus.get_separable_atrous_conv(
                x_output, atrous_rate=atrous_rate)

            x = layers.Conv2D(256, (1, 1),
                              padding='same',
                              use_bias=False,
                              name='concat_projection',
                              kernel_initializer='he_normal')(x)
            x = layers.BatchNormalization(name='concat_projection_BN',
                                          epsilon=1e-5)(x)
            x = layers.Activation('relu')(x)
            x = layers.Dropout(0.1)(x)

        with tf.variable_scope("decoder"):
            # # x4 (x2) block
            skip1 = encoder.get_layer('block3_sepconv2_bn').output
            x = BilinearResizeLayer2D(target_size=(K.int_shape(skip1)[1],
                                                   K.int_shape(skip1)[2]),
                                      name='UpSampling1')(x)
            dec_skip1 = layers.Conv2D(48, (1, 1),
                                      padding='same',
                                      use_bias=False,
                                      name='feature_projection0',
                                      kernel_initializer='he_normal')(skip1)
            dec_skip1 = layers.BatchNormalization(
                name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
            dec_skip1 = layers.Activation('relu')(dec_skip1)
            x = layers.Concatenate()([x, dec_skip1])

            x = layers.Conv2D(class_no, (1, 1),
                              padding='same',
                              kernel_initializer='he_normal')(x)
            x = BilinearResizeLayer2D(
                target_size=K.int_shape(input_tensor)[1:3],
                name='UpSampling2')(x)

        x = layers.Activation('softmax')(x)
        model = models.Model(inputs=input_tensor,
                             outputs=x,
                             name='deeplab_try')

        return model
def xception_globalavgpool():
    Xception_notop = Xception(include_top=False, weights='imagenet',
                        input_tensor=None, input_shape = size)
    output = Xception_notop.get_layer(index = -1).output
    output = GlobalAveragePooling2D()(output)
    output = Dense(3, activation='softmax', name='predictions')(output)
    Xception_model = Model(Xception_notop.input, output)
    Xception_model.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return Xception_model
Ejemplo n.º 8
0
    def create_feature_sub_nn(self, feature):
        input_name = '%s_input' % feature['name']
        image_size = self.model_parameters.get_image_size(feature)
        (width, height, depth) = image_size
        cnn = self.model_parameters.get_cnn(feature)

        if cnn == "xception":
            raise NotImplementedError()
            # TODO: not tested
            base_model = Xception(weights='imagenet')
            # base_model.summary()

            for i, layer in enumerate(base_model.layers):
                layer.name = '%s_%s' % (feature['name'], layer.name)
                layer.trainable = i > 63
                # total: 126

            # Setting layer name: https://github.com/keras-team/keras/issues/6194
            base_model.layers[0].name = input_name
            input = base_model.input
            output = base_model.get_layer('avg_pool').output # avg_pool might not exists ...
            output = Flatten()(output)

            return input, output
        elif cnn == "mobilenet":
            base_model = MobileNet(
                include_top=False,
                weights='imagenet',
                input_tensor=Input(shape=image_size),
                input_shape=image_size
            )
            # base_model.summary()

            # Rename layers to avoid conflicts
            for i, layer in enumerate(base_model.layers):
                layer.name = '%s_%s' % (feature['name'], layer.name)

            # Get input and output
            base_model.layers[0].name = input_name
            input = base_model.input
            output = base_model.layers[-6].output
            output = Flatten()(output)

            # We only train the last layers (total: 88)
            for i, layer in enumerate(base_model.layers):
                layer.trainable = i > 50

            return input, output
        elif cnn == 'small':
            return self.create_standard_cnn(input_name, width, height, depth, 2)
        elif cnn == 'medium':
            return self.create_standard_cnn(input_name, width, height, depth, 3)
        elif cnn == 'large':
            return self.create_standard_cnn(input_name, width, height, depth, 4)
        else:
            raise NotImplementedError('Unsupported ImageNN: %s' % self.model_parameters.cnn)
Ejemplo n.º 9
0
def xception_encoder(image_size):
    encoder = Xception(include_top=False, input_shape=image_size + (3,))
    print('Model loaded.')
    
    feature_map_temp = encoder.get_layer('block13_sepconv1_act').output
    feature_net = Model(inputs=encoder.input, 
                        outputs=feature_map_temp)
    
    weight_to_monitor = feature_net.get_layer('block12_sepconv2').weights[0][0,0,0,0]
    
    return feature_net, weight_to_monitor
Ejemplo n.º 10
0
 def _build(self, weights):
     base_model = Xception(include_top=False, weights='imagenet')
     x = base_model.layers[105].output
     block4_out = base_model.get_layer('block4_sepconv2_bn').output
     block4_avg = GlobalAveragePooling2D()(block4_out)
     self.block4_dense = Dense(512, name="block4_dense")(block4_avg)
     genre_predictions = self._create_block(x, self.num_genres, 'genre')
     style_predictions = self._create_block(x, self.num_styles, 'style')
     self.model = Model(inputs=base_model.input, outputs=[genre_predictions, style_predictions])
     if weights:
         self.model.load_weights(weights)
def xception_dense():
    notop = Xception(include_top=False, weights='imagenet',
                        input_tensor=None, input_shape = size)
    output = notop.get_layer(index = -1).output
    output = Flatten(name='flatten')(output)
    output = Dropout(0.2)(output)
    output = Dense(512)(output)
    output = PReLU()(output)
    output = BatchNormalization()(output)
    output = Dropout(0.3)(output)
    output = Dense(3, activation='softmax', name = 'predictions')(output)
    Xception_model = Model(notop.input, output)
    Xception_model.compile(loss='categorical_crossentropy', optimizer = optimizer, metrics = ['accuracy'])
    return Xception_model
Ejemplo n.º 12
0
def predict():

    img2 = Image.open(filename)
    with open('settings/settings.json') as f:
        config = json.load(f)

    m_name = config["model"]
    weights = config["weights"]
    include_top = config["include_top"]
    train_path = config["train_path"]
    test_path = config["test_path"]
    deep_features = config["deep_features"]
    labels_path = config["labels_path"]
    test_size = config["test_size"]
    results = config["results"]
    model_path = config["model_path"]
    seed = config["seed"]
    classifier_path = config["classifier_path"]

    classifier = pickle.load(open(classifier_path, 'rb'))

    classifier = pickle.load(open(classifier_path, 'rb'))

    if m_name == "xception":
        pre_trained = Xception(weights=weights)
        model = Model(pre_trained.input,
                      pre_trained.get_layer('avg_pool').output)
        image_size = (299, 299)
    else:
        pre_trained = None

    train_labels = os.listdir(train_path)

    img = img2
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    feature = model.predict(x)
    flat = feature.flatten()
    flat = np.expand_dims(flat, axis=0)
    preds = classifier.predict(flat)
    prediction = train_labels[preds[0]]
    print("" + train_labels[preds[0]])

    x = train_labels[preds[0]].split("#")
    x = [item.replace(",", " #") for item in x]
    explanation = "Recommended Hashtags:\n #" + " #".join(x)
    w2 = tk.Label(main, justify=tk.CENTER, padx=100, text=explanation)
    w2.config(font=("Courier", 12, 'bold'), fg="DarkOrchid4")
    w2.pack()
Ejemplo n.º 13
0
def create_bottlenet_features_xception(image_tensor):

    from keras.applications.xception import Xception
    from keras.applications.xception import preprocess_input as preprocess_input_xception
    xception = Xception(weights='imagenet', include_top=False)
    #print(xception.summary())

    bottleneck_model = Model(inputs=xception.input, outputs=xception.get_layer('block14_sepconv2_act').output)

    #print(bottleneck_model.summary())

    bottleneck_features = bottleneck_model.predict(preprocess_input_xception(image_tensor))
    print(bottleneck_features.shape)

    return bottleneck_features
Ejemplo n.º 14
0
def build_xception():
    model = Xception(include_top=False, pooling='max')
    output = Dense(128, activation='softmax',
                   name='predictions')(model.layers[-1].output)
    model = Model(inputs=model.layers[0].input, outputs=output)
    finetuned_layers_names = ['predictions']
    finetuned_layers = [
        model.get_layer(name=layer_name)
        for layer_name in finetuned_layers_names
    ]
    for layer in model.layers:
        if layer not in finetuned_layers:
            layer.trainable = False

    return model
Ejemplo n.º 15
0
def perceptual_loss(img_true, img_generated):
    image_shape = (600, 600, 3)
    xception = Xception(include_top=False,
                        weights='imagenet',
                        input_shape=image_shape)
    loss_block3 = Model(
        inputs=xception.input,
        outputs=xception.get_layer('block5_sepconv1_act').output)
    loss_block3.trainable = False
    #loss_block2 = Model(inputs=xception.input, outputs=xception.get_layer('block2_conv2').output)
    #loss_block2.trainable = False
    #loss_block1 = Model(input=xception.input, outputs=xception.get_layer('block1_conv2').output)
    #loss_block1.trainable = False
    #return K.mean(K.square(loss_block1(img_true) - loss_block1(img_generated))) + 2 * K.mean(
    #    K.square(loss_block2(img_true) - loss_block2(img_generated))) + 5 * K.mean(
    #    K.square(loss_block3(img_true) - loss_block3(img_generated)))
    return K.mean(K.square(loss_block3(img_true) - loss_block3(img_generated)))
Ejemplo n.º 16
0
def build_2d_net(img_size):
    # 输出是96*28*28
    base_model = Xception(
        include_top=False,
        weights='imagenet',
        input_shape=(img_size, img_size,
                     3))  # default input shape is (299, 299, 3)

    x = layers.SeparableConv2D(
        96, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(
            base_model.get_layer('block4_sepconv1_act').output)
    x = layers.BatchNormalization(name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)

    model = Model(inputs=base_model.input, outputs=x)
    print(model.summary())
    return model
Ejemplo n.º 17
0
def bisenet_model():
    """

    :return:
    """
    inputs = Input(shape=Input_size)
    x = Lambda(lambda image: preprocess_input(image))(inputs)
    xception = Xception(weights='imagenet',
                        input_shape=Input_size,
                        include_top=False)

    tail_prev = xception.get_layer('block13_pool').output
    tail = xception.output

    output = final_model(x, tail_prev, tail)
    # inputs, xception_inputs, ans = get_model()
    model = Model(inputs=[inputs, xception.input], output=[output])
    return model
Ejemplo n.º 18
0
    def __init__(self):
        
        base_model = VGG16(weights='imagenet')
        
        self.model_fc1 = Model(inputs=base_model.input,
                            outputs=base_model.get_layer('fc1').output
        )

        self.model_fc2 = Model(inputs=base_model.input,
                            outputs=base_model.get_layer('fc2').output
        )

        v3base_model = InceptionV3(weights='imagenet')
        self.model_v3 = Model(inputs=v3base_model.input,
                            outputs=v3base_model.get_layer('avg_pool').output
        )

        # avg_pool (GlobalAveragePooling2 (None, 2048) block14_sepconv2_act[0][0] 
        xbase_model = Xception(weights='imagenet')
        self.model_x = Model(input=xbase_model.input,
                            outputs=xbase_model.get_layer('avg_pool').output
        )
Ejemplo n.º 19
0
def model_Xception():
    input_2 = Input(shape=[1], name="angle")
    angle_layer = Dense(1, )(input_2)
    base_model = Xception(weights='imagenet', include_top=False,input_shape=X_train.shape[1:], classes=1)
    x = base_model.get_layer('block4_pool').output  
    x = GlobalAveragePooling2D()(x) 
    #x = Flatten()(x)
    
    merge_one = concatenate([x, angle_layer])
    merge_one = Dense(512, activation='relu', name='fc2')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    merge_one = Dense(512, activation='relu', name='fc3')(merge_one)
    merge_one = Dropout(0.3)(merge_one)
    
    predictions = Dense(1, activation='sigmoid')(merge_one)
    
    model = Model(input=[base_model.input, input_2], output=predictions)
    
#    sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
#    model.compile(loss='binary_crossentropy',
#                  optimizer=sgd,
#                  metrics=['accuracy'])
    return model
Ejemplo n.º 20
0
        def __init__(self,
                     xes,
                     y1es,
                     y2es,
                     image_net_model_type='vgg',
                     k=40,
                     weight=False):
            print('k =', k, 'model_type= ', image_net_model_type)
            print('setting up model...')
            self.keras_model = keras_knn(xes, y1es, y2es, k=k)
            print('...done')

            self.model_name = 'knn_' + image_net_model_type
            self.weight = weight
            if weight:
                self.model_name = self.model_name + '_weight'
            self.image_net_model_type = image_net_model_type
            self.xes = xes.T
            self.y1es = y1es
            self.y2es = y2es

            if image_net_model_type == 'vgg':
                base_vgg_model = VGG16(weights='imagenet')
                self.image_net_model = keras.models.Model(
                    inputs=base_vgg_model.input,
                    outputs=base_vgg_model.get_layer('fc1').output)
            elif image_net_model_type == 'resnet':
                self.image_net_model = ResNet50(include_top=False,
                                                weights='imagenet')
            elif image_net_model_type == 'xception':
                xception_base_model = Xception(include_top=True,
                                               weights='imagenet')
                self.image_net_model = keras.models.Model(
                    inputs=xception_base_model.input,
                    outputs=xception_base_model.get_layer('avg_pool').output)
            self.input_shape = self.image_net_model.input_shape
            print('Finished constructor')
Ejemplo n.º 21
0
def Make_Model(modelConfig, datasetConfig):

    strModelType = modelConfig.MODEL_TYPE
    strPretrained = modelConfig.PRETRAINED_MODEL
    im_Shape = datasetConfig.IMG_SHAPE
    strOptimizer = modelConfig.OPTIMIZER
    num_Classes = datasetConfig.NUM_CLASS
    learingRate = modelConfig.LEARNING_RATE
    decay = modelConfig.DECAY
    momentum = modelConfig.MOMENTUM
    loss = modelConfig.LOSS

    optimizer = None

    if (strOptimizer == "SGD"):
        optimizer = SGD(lr=learingRate,
                        decay=decay,
                        momentum=momentum,
                        nesterov=True)  # decay = 1e-4
    elif (strOptimizer == "ADAM"):
        optimizer = Adam(lr=learingRate, decay=decay)
    else:
        print("No Such a Optimizer")
        return None

    model = None

    Num = 2

    if (strModelType == "VGG16"):
        model = VGG16(weights=strPretrained,
                      include_top=False,
                      input_shape=im_Shape,
                      classes=Num)
    elif (strModelType == "RESNET50"):
        model = ResNet50(weights=strPretrained,
                         include_top=True,
                         input_shape=im_Shape,
                         classes=Num)
    elif (strModelType == "RESNET152"):
        model = build_Resnet152_Model(im_Shape, Num, strPretrained)
    elif (strModelType == "INCEPTIONV3"):
        model = InceptionV3(weights=strPretrained,
                            include_top=True,
                            input_shape=im_Shape,
                            classes=Num)
    elif (strModelType == "INCEPTIONRESV2"):
        model = InceptionResNetV2(weights=strPretrained,
                                  include_top=True,
                                  input_shape=im_Shape,
                                  classes=Num)
    elif (strModelType == "SEINCEPTIONRESV2"):
        model = SEInceptionResNetV2(weights=strPretrained,
                                    include_top=True,
                                    input_shape=im_Shape,
                                    classes=Num)
    elif (strModelType == "XCEPTION"):
        model = Xception(weights=strPretrained,
                         include_top=True,
                         input_shape=im_Shape,
                         classes=Num)
        #basemodel = Xception(weights="imagenet", include_top=True, input_shape=(299,299,3), classes = 1000)
        #x = Dense(num_Classes, activation='softmax', name='predictions')(basemodel.layers[-2].output)
        #model = Model(basemodel.input, x)
    elif (strModelType == "UNET2D"):
        model = build_UNet2D_4L(im_Shape, strPretrained)
    elif (strModelType == "CNN6Layers"):
        model = build_CNN_6layers(im_Shape, num_classes=num_Classes)
    else:
        print("No Such Model Type")
        return None

    # for layer in model.layers[:-15] :
    #     layer.trainable = False

    # oldlayer = model.layers.pop(-1)
    # model.layers.pop(-1)
    x_xc = model.get_layer(index=-2).output
    #print(x_xc.output_shape)
    # print(x_xc.shape)
    #x_xc = GlobalMaxPooling2D()(x_xc)
    out = Dense(int(num_Classes), activation='softmax', name='pp')(x_xc)

    model = Model(input=model.input, output=out)

    # upperlayer = model.layers[-2]
    # output = Dense(int(num_Classes), activation='softmax')(upperlayer)
    #model.trainable = False
    #model = model (input = model.layers[0], output = output)
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    model.summary()

    return model
Ejemplo n.º 22
0
from keras.optimizers import Adam
from keras import layers
from keras import Model

pre_model = Xception(weights=None,
                     input_shape=(150, 150, 3),
                     include_top=False)

pre_weights = 'C:/Users/hp/Desktop/pneumonia/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'

pre_model.load_weights(pre_weights)

for layer in pre_model.layers:
    layer.trainable = False

last_layer = pre_model.get_layer('block4_pool')
last_output = last_layer.output

x = layers.AveragePooling2D(7, 7)(last_output)
x = layers.Flatten()(x)
x = layers.Dense(1024, activation='relu')(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1, activation='sigmoid')(x)

model = Model(pre_model.input, x)
model.compile(optimizer=Adam(lr=0.0001),
              loss='binary_crossentropy',
              metrics=['accuracy'])
Ejemplo n.º 23
0
def main():
    # Parameters
    if len(sys.argv) == 3:
        superclass = sys.argv[1]
        model_weight = sys.argv[2]
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    # Feature extraction model
    base_model = Xception(include_top=True,
                          weights=None,
                          input_tensor=None,
                          input_shape=None,
                          pooling=None,
                          classes=classNum[superclass[0]])
    base_model.load_weights(model_weight)
    base_model.summary()
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('avg_pool').output)

    imgdir_train = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
    imgdir_test = '../zsl_' + testName[superclass[0]] + '_' + str(
        superclass).lower() + '_test_' + date
    categories = os.listdir(imgdir_train)
    categories.append('test')

    num = 0
    for eachclass in categories:
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train + '/' + eachclass
        num += len(os.listdir(classpath))

    print('Total image number = ' + str(num))

    features_all = np.ndarray((num, 2048))
    labels_all = list()
    images_all = list()
    idx = 0

    # Feature extraction
    for iter in tqdm(range(len(categories))):
        eachclass = categories[iter]
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train + '/' + eachclass
        imgs = os.listdir(classpath)

        for eachimg in imgs:
            if eachimg[0] == '.':
                continue

            img_path = classpath + '/' + eachimg
            img = image.load_img(img_path, target_size=(229, 299))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            feature = model.predict(x)

            features_all[idx, :] = feature
            labels_all.append(eachclass)
            images_all.append(eachimg)
            idx += 1

    features_all = features_all[:idx, :]
    labels_all = labels_all[:idx]
    images_all = images_all[:idx]
    data_all = {
        'features_all': features_all,
        'labels_all': labels_all,
        'images_all': images_all
    }

    # Save features
    savename = 'features_' + superclass + '.pickle'
    fsave = open(savename, 'wb')
    pickle.dump(data_all, fsave)
    fsave.close()
Ejemplo n.º 24
0
def main():
    # Parameters
    if len(sys.argv) == 4:
        superclass = sys.argv[1]
        imgmove = sys.argv[2]
        if imgmove == 'False':
            imgmove = False
        else:
            imgmove = True
        lr = float(sys.argv[3])
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    classAttrsNums = {'Animals': 123, 'Fruits': 58, 'Hairstyles':22,'Vehicles':81,'Electronics':75}
    classAttrsNum = classAttrsNums[superclass]

    trainpath = 'trainval_'+superclass+'/train'
    valpath = 'trainval_'+superclass+'/val'
    weightname = 'model/mobile_'+superclass+'_wgt_reduced.h5'

    if not os.path.exists('model'):
        os.mkdir('model')

    # Train/validation data preparation
    if imgmove:
        os.mkdir('trainval_'+superclass)
        os.mkdir(trainpath)
        os.mkdir(valpath)
        sourcepath = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
        categories = os.listdir(sourcepath)
        for eachclass in categories:
            if eachclass[0] == superclass[0]:
                print(eachclass)
                os.mkdir(trainpath+'/'+eachclass)
                os.mkdir(valpath+'/'+eachclass)
                imgs = os.listdir(sourcepath+'/'+eachclass)
                idx = 0
                for im in imgs:
                    if idx%8 == 0:
                        shutil.copyfile(sourcepath+'/'+eachclass+
                                        '/'+im, valpath+'/'+eachclass+'/'+im)
                    else:
                        shutil.copyfile(sourcepath+'/'+eachclass+
                                        '/'+im, trainpath+'/'+eachclass+'/'+im)
                    idx += 1

    # Train and validation ImageDataGenerator
    batchsize = 32

    train_datagen = XmanImageDataGenerator(
        rescale=1./255,
        rotation_range=15,
        width_shift_range=5,
        height_shift_range=5,
        horizontal_flip=True)

    test_datagen = XmanImageDataGenerator(rescale=1./255)

    train_generator = train_datagen.flow_from_directory(
        trainpath,
        target_size=(72, 72),
        batch_size=batchsize)

    valid_generator = test_datagen.flow_from_directory(
        valpath,
        target_size=(72, 72),
        batch_size=batchsize)


    # Train Xception
    base_model = Xception(include_top=True, weights=None,
                      input_tensor=None, input_shape=(72,72,3),
                      pooling=None, classes=classNum[superclass[0]])
    output = Dense(classAttrsNum, activation='sigmoid', name='predictions')(base_model.get_layer('avg_pool').output)
    model = Model(inputs=base_model.input, outputs=output)

    model.summary()
    model.compile(optimizer=SGD(lr=lr, momentum=0.9),
                  loss='binary_crossentropy', metrics=['accuracy'])

    steps_per_epoch = int(train_generator.n/batchsize)
    validation_steps = int(valid_generator.n/batchsize)


    if os.path.exists(weightname):
        model.load_weights(weightname)

    checkpointer = ModelCheckpoint(weightname, monitor='val_loss', verbose=0,
                        save_best_only=True, save_weights_only=True, mode='auto', period=1)
    model.fit_generator(
        train_generator,
        steps_per_epoch=steps_per_epoch,
        epochs=25,
        validation_data=valid_generator,
        validation_steps=validation_steps,
        callbacks=[checkpointer])
Ejemplo n.º 25
0
        # VGG16_Layer_Before_Last_Layer
    elif model_name == "VGG16_Last":
        #print(3)
        base_model_VGG16 = VGG16(weights='imagenet', include_top=False)
        built_model = Model(
            inputs=base_model_VGG16.input,
            outputs=base_model_VGG16.get_layer('block4_pool').output)
    # Xception
    elif model_name == "Xception":
        #print(4)
        model_Xception_base = Xception(weights='imagenet',
                                       pooling='max',
                                       include_top=False)
        built_model = Model(
            inputs=model_Xception_base.input,
            outputs=model_Xception_base.get_layer('block4_pool').output)

    product_image = pd.read_csv(file_path, )
    file_List = product_image['Product_Image'].tolist()
    product_List = product_image['Product_Id'].tolist()
    List_product_List = chunks(product_List, 1000)
    List_file_List = chunks(file_List, 1000)
    jobs = []
    # lock = t.Lock()
    p = Pool()
    #print(built_model)
    for product_List, file_List in zip(enumerate(List_product_List),
                                       enumerate(List_file_List)):
        #if product_List[0]>6 and file_List[0]>6: #Comment the line
        doJob(product_List[0], file_List[1], product_List[1], "Temp",
              model_name, built_model)
Ejemplo n.º 26
0
# check for pretrained weight usage or not  / önceden eğitilmiş ağırlık kullanımı olup olmadığının kontrolü
# check for top layers to be included or not / üst katmanların dahil edilip edilmeyeceğinin kararının verilmesi
if model_name == "vgg16":
    base_model = VGG16(weights=weights)
    model = Model(input=base_model.input,
                  output=base_model.get_layer('fc1').output)
    image_size = (224, 224)
elif model_name == "vgg19":
    base_model = VGG19(weights=weights)
    model = Model(input=base_model.input,
                  output=base_model.get_layer('fc1').output)
    image_size = (224, 224)
elif model_name == "xception":
    base_model = Xception(weights=weights)
    model = Model(input=base_model.input,
                  output=base_model.get_layer('avg_pool').output)
    image_size = (299, 299)
else:
    base_model = None

print("[INFO] successfully loaded base model and model...")

# path to training dataset / eğitim veriseti yolu
train_labels = os.listdir(train_path)

# encode the labels / etiketlerin kodlanması
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([tl for tl in train_labels])

# variables to hold features and labels / özellik ve etiketleri tutması için değişkenlerin tanımlanması
Ejemplo n.º 27
0
    def get_image_features(self, image_directory):

        from keras.preprocessing import image
        from keras.models import Model

        if self.cnn_extractor == 'vgg16':

            from keras.applications.vgg16 import preprocess_input
            from keras.applications import VGG16

            self.IMG_FEATS = 4096
            base_model = VGG16(weights='imagenet')
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('fc2').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in tqdm(
                    enumerate(self.image_feature_files)):
                image_path = image_directory + image_file
                # if image_arg%100 == 0:
                #     print('%.2f %% completed' %
                #             round(100*image_arg/number_of_images,2))
                img = image.load_img(image_path, target_size=(224, 224))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)

        elif self.cnn_extractor == 'vgg19':

            from keras.applications.vgg19 import preprocess_input
            from keras.applications import VGG19

            self.IMG_FEATS = 4096
            base_model = VGG19(weights='imagenet')
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('fc2').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in enumerate(self.image_feature_files):
                image_path = image_directory + image_file
                if image_arg % 100 == 0:
                    print('%.2f %% completed' %
                          round(100 * image_arg / number_of_images, 2))
                img = image.load_img(image_path, target_size=(224, 224))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)

        elif self.cnn_extractor == 'inception':

            from keras.applications.inception_v3 import preprocess_input
            from keras.applications import InceptionV3

            self.IMG_FEATS = 2048
            base_model = InceptionV3(weights='imagenet')
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('avg_pool').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in tqdm(
                    enumerate(self.image_feature_files)):
                image_path = image_directory + image_file
                # if image_arg%100 == 0:
                #     print('%.2f %% completed' %
                #             round(100*image_arg/number_of_images,2))
                img = image.load_img(image_path, target_size=(299, 299))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
        elif self.cnn_extractor == 'vgg16_places':
            from VGG_Place205 import VGG16_places
            from keras.applications.vgg16 import preprocess_input
            self.IMG_FEATS = 4096
            base_model = VGG16_places()
            base_model.load_weights(
                "VGG_Place205/vgg_tensorflow_channels_last.h5")
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('fc7').output)

            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in enumerate(self.image_feature_files):
                image_path = image_directory + image_file
                if image_arg % 100 == 0:
                    print('%.2f %% completed' %
                          round(100 * image_arg / number_of_images, 2))
                img = image.load_img(image_path, target_size=(224, 224))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
        elif self.cnn_extractor == 'inceptionresnetv2':
            from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
            base_model = InceptionResNetV2(weights="imagenet")
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('avg_pool').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in enumerate(self.image_feature_files):
                image_path = image_directory + image_file
                if image_arg % 100 == 0:
                    print('%.2f %% completed' %
                          round(100 * image_arg / number_of_images, 2))
                img = image.load_img(image_path, target_size=(229, 229))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
        elif self.cnn_extractor == 'resnet50':
            from keras.applications.resnet50 import ResNet50, preprocess_input
            base_model = ResNet50(weights="imagenet")
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('avg_pool').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in tqdm(
                    enumerate(self.image_feature_files)):
                image_path = image_directory + image_file
                # if image_arg%100 == 0:
                #     print('%.2f %% completed' %
                #             round(100*image_arg/number_of_images,2))
                img = image.load_img(image_path, target_size=(224, 224))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
        elif self.cnn_extractor == 'xception':
            from keras.applications.xception import Xception, preprocess_input
            base_model = Xception(weights="imagenet")
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('avg_pool').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in enumerate(self.image_feature_files):
                image_path = image_directory + image_file
                if image_arg % 100 == 0:
                    print('%.2f %% completed' %
                          round(100 * image_arg / number_of_images, 2))
                img = image.load_img(image_path, target_size=(224, 224))
                img = image.img_to_array(img)
                img = np.expand_dims(img, axis=0)
                img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
        elif self.cnn_extractor == 'resnet152':
            from resnet152 import resnet152_model
            base_model = resnet152_model("resnet152_weights_tf.h5")
            model = Model(inputs=base_model.input,
                          outputs=base_model.get_layer('avg_pool').output)
            self.extracted_features = []
            self.image_feature_files = list(set(self.image_files))
            number_of_images = len(self.image_feature_files)
            for image_arg, image_file in enumerate(self.image_feature_files):
                image_path = image_directory + image_file
                if image_arg % 100 == 0:
                    print('%.2f %% completed' %
                          round(100 * image_arg / number_of_images, 2))
                # img = image.load_img(image_path, target_size=(224, 224))
                # img = image.img_to_array(img)
                img = cv2.resize(cv2.imread(image_path),
                                 (224, 224)).astype(np.float32)
                # Remove train image mean
                img[:, :, 0] -= 103.939
                img[:, :, 1] -= 116.779
                img[:, :, 2] -= 123.68
                img = np.expand_dims(img, axis=0)
                # img = preprocess_input(img)
                CNN_features = model.predict(img)
                self.extracted_features.append(np.squeeze(CNN_features))
            self.extracted_features = np.asarray(self.extracted_features)
Ejemplo n.º 28
0
def get_model():

    base_model = Xception(weights='imagenet', include_top=True)
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('avg_pool').output)
    return model
Ejemplo n.º 29
0
inputs = Input(shape=(224, 224, 3))
x = Lambda(lambda image: ktf.image.resize_images(image, (224, 224)))(inputs)
x = Lambda(lambda image: preprocess_input(image))(x)

# Spatial Path (conv_bn_act with strides = 2 )
SP = conv_bn_act(inputs, 32, strides=2)
SP = conv_bn_act(SP, 64, strides=2)
SP = conv_bn_act(SP, 156, strides=2)

# Context_path (Xception backbone and Attetion Refinement Module(ARM))
Xception_model = Xception(weights='imagenet',
                          input_shape=(224, 224, 3),
                          include_top=False)

# 16x Down
layer_13 = Xception_model.get_layer('block13_pool').output
# 32x Down
layer_14 = Xception_model.output
# Context path & ARM
CP_ARM = CP_ARM(layer_13, layer_14)

# Feature Fusion Module(FFM)
FFM = FFM(SP, CP_ARM, 32)

# Upsampling the ouput to normal size
output = UpSampling2D(size=(16, 16),
                      data_format='channels_last',
                      interpolation='nearest')(FFM)

bisnet = Model(inputs=[inputs, Xception_model.input],
               output=[output, layer_13, layer_14])
Ejemplo n.º 30
0
history_df[['acc', 'val_acc']].plot()



print("Preds after loading weights")
model.load_weights(modelweights)
preds = model.predict(X_test, verbose=2)
from sklearn.metrics import accuracy_score,f1_score,roc_curve,auc
print(accuracy_score(Y_test,np.round(preds)))
print(f1_score(Y_test,np.round(preds)))
fpr, tpr, thresholds = roc_curve(Y_test, preds)
print("AUC: " + str(auc(fpr, tpr)))

from keras.models import Model
layer_name = 'global_average_pooling2d_1'#'global_average_pooling2d_1'
model_extracted= Model(inputs=model.input, outputs=model.get_layer(layer_name).output)
model_extracted.summary()

Train_Features=model_extracted.predict(X_train)
Train_Target=Y_train

Val_Features=model_extracted.predict(X_val)
Val_Target=Y_val

Test_Features=model_extracted.predict(X_test)
Test_Target=Y_test

import pickle

with open(filetosaveto, "wb") as f:
	pickle.dump((Train_Features,Train_Target,Val_Features,Val_Target,Test_Features,Test_Target,model,model_extracted), f)