Exemplo n.º 1
0
def create_model():
    input_ = Input((400, 400, 3))
    backbone = ResNet50V2(input_tensor=input_,
                          weights='imagenet',
                          include_top=False)

    layer6 = backbone.get_layer('post_bn').output  #13, 13, 2048
    layer6 = BatchNormalization()(layer6)
    layer6 = LeakyReLU(alpha=.1)(layer6)

    decoder5 = Conv2DTranspose(256, (3, 3),
                               strides=(2, 2))(layer6)  #26, 26, 256
    decoder5 = BatchNormalization()(decoder5)
    decoder5 = LeakyReLU(alpha=.1)(decoder5)
    decoder5 = Conv2D(256, (3, 3))(decoder5)  #25, 25, 256
    layer5 = backbone.get_layer('conv4_block5_out').output  #25, 25, 1024
    layer5 = spatial_pyramid_block(layer5, 256, 2, 3, pad=1)  #25, 25, 256
    decoder5 = Concatenate()([decoder5, layer5])  #25, 25, 512
    decoder5 = res_block(decoder5, 256, activation=True)  #25, 25, 256

    decoder4 = Conv2DTranspose(128, (3, 3), strides=(2, 2),
                               padding='same')(decoder5)  #50, 50, 128
    layer4 = backbone.get_layer('conv3_block4_1_conv').output  #50, 50, 128
    layer4 = spatial_pyramid_block(layer4, 128, 2, 3, pad=2)  #50, 50, 128
    decoder4 = Concatenate()([decoder4, layer4])  #50, 50, 256
    decoder4 = Dropout(.2)(decoder4)
    decoder4 = res_block(decoder4, 128, activation=True)  #50, 50, 128

    decoder3 = Conv2DTranspose(64, (3, 3), strides=(2, 2),
                               padding='same')(decoder4)  #100, 100, 64
    layer3 = backbone.get_layer('conv2_block3_1_conv').output  #100, 100, 64
    layer3 = spatial_pyramid_block(layer3, 64, 2, 3)  #100, 100, 64
    decoder3 = Concatenate()([decoder3, layer3])  #100, 100, 128
    decoder3 = Dropout(.2)(decoder3)
    decoder3 = res_block(decoder3, 64, activation=True)  #100, 100, 64

    decoder2 = Conv2DTranspose(32, (3, 3), strides=(2, 2),
                               padding='same')(decoder3)  #200, 200, 32
    layer2 = backbone.get_layer('conv1_conv').output  #200, 200, 64
    layer2 = spatial_pyramid_block(layer2, 32, 2, 3)  #200, 200, 32
    decoder2 = Concatenate()([decoder2, layer2])  #200, 200, 64
    decoder2 = Dropout(.2)(decoder2)
    decoder2 = res_block(decoder2, 32, activation=True)  #200, 200, 32

    decoder1 = Conv2DTranspose(16, (3, 3), strides=(2, 2),
                               padding='same')(decoder2)  #400, 400, 16
    layer1 = input_  #400, 400, 3
    layer1 = spatial_pyramid_block(layer1, 16, 2, 3)  #400, 400, 16
    decoder1 = Concatenate()([decoder1, layer1])  #400, 400, 32
    decoder1 = Dropout(.2)(decoder1)
    decoder1 = res_block(decoder1, 16, activation=True)  #400, 400, 16
    decoder1 = Dropout(.3)(decoder1)

    output_ = Conv2D(1, (1, 1), padding='same',
                     activation='sigmoid')(decoder1)  #400, 400, 1

    model = Model(input_, output_)
    model.name = 'uspp_resnet50v2'

    return model
Exemplo n.º 2
0
def load_resnet50():
    weights_path = '../Models/Trained models/resnet50v2.h5'
    baseNet = ResNet50V2(weights=None,
                         include_top=False,
                         input_tensor=Input(shape=(224, 224, 3)))
    model = create_model_head(baseNet)
    model.load_weights(weights_path)
    return model
Exemplo n.º 3
0
def extract_features(data):
    conv_base = ResNet50V2(include_top=False,
                           weights="imagenet",
                           input_shape=data[0].shape)
    features = conv_base.predict(data)
    features = np.reshape(features,
                          (len(features), np.prod(features[0].shape)))
    return features
Exemplo n.º 4
0
    def get_model(self):
        """
            Model architecture
        """
        if self.conv_base:
            self.conv_base = self.conv_base(include_top=False,
                                            input_tensor=None,
                                            input_shape=self.input_shape,
                                            pooling=None,
                                            classes=None)
        else:
            self.conv_base = ResNet50V2(include_top=False,
                                        input_tensor=None,
                                        input_shape=self.input_shape,
                                        pooling=None,
                                        classes=None)  #rebuild on change)

        # Define the tensors for the two input images
        left_input = Input(self.input_shape)
        right_input = Input(self.input_shape)

        # Convolutional Neural Network

        encoded_l = self.conv_base(left_input)
        encoded_r = self.conv_base(right_input)

        flatten = Flatten()
        dense = Dense(4096)

        flattened_l = flatten(encoded_l)
        flattened_r = flatten(encoded_r)
        dense_l = dense(flattened_l)
        dense_r = dense(flattened_r)

        # Add a customized layer to compute the absolute difference between the encodings

        L1_layer = Lambda(lambda tensors: K.abs(tensors[0] - tensors[1]))
        L1_distance = L1_layer([dense_l, dense_r])

        # Add a dense layer with a sigmoid unit to generate the similarity score
        prediction = Dense(1)(L1_distance)  #,activation='sigmoid')
        #,
        #bias_initializer=initialize_bias)

        # Connect the inputs with the outputs
        self.model_to_use = Model(inputs=[left_input, right_input],
                                  outputs=prediction)
Exemplo n.º 5
0
 def train(self):
     # re-size all the images to this
     IMAGE_SIZE = [224, 224]
     # add preprocessing layer to the front of VGG
     resnet = ResNet50V2(input_shape=IMAGE_SIZE + [3],
                         weights='imagenet',
                         include_top=False)
     # don't train existing weights
     for layer in resnet.layers:
         layer.trainable = False
     # useful for getting number of classes
     folders = glob(self.train_path + '*')
     # our layers - you can add more if you want
     x = Flatten()(resnet.output)
     prediction = Dense(len(folders), activation='sigmoid')(x)
     # create a model object
     model = Model(inputs=resnet.input, outputs=prediction)
     # view the structure of the model
     model.summary()
     # tell the model what cost and optimization method to use
     model.compile(loss='binary_crossentropy',
                   optimizer='adam',
                   metrics=['accuracy'])
     # Use the Image Data Generator to import the images from the dataset
     train_datagen = ImageDataGenerator(rescale=1. / 255,
                                        shear_range=0.2,
                                        zoom_range=0.2,
                                        horizontal_flip=True)
     test_datagen = ImageDataGenerator(rescale=1. / 255)
     training_set = train_datagen.flow_from_directory(
         self.train_path,
         target_size=(224, 224),
         batch_size=32,
         class_mode='categorical')
     test_set = test_datagen.flow_from_directory(self.train_path,
                                                 target_size=(224, 224),
                                                 batch_size=32,
                                                 class_mode='categorical')
     # fit the model
     r = model.fit_generator(training_set,
                             validation_data=test_set,
                             epochs=10,
                             steps_per_epoch=2,
                             validation_steps=len(test_set))
     model.save(self.model_save_path)
Exemplo n.º 6
0
def build_ResNet50(input_tensor_shape):
    base_model = ResNet50V2(weights='imagenet',
                            include_top=False,
                            input_shape=input_tensor_shape)

    x_model = base_model.output

    x_model = AvgPool2D(name='globalaveragepooling2d')(x_model)

    x_model = Dense(1024, activation='relu', name='fc1_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_1')(x_model)
    x_model = Flatten()(x_model)
    x_model = Dense(256, activation='relu', name='fc2_Dense')(x_model)
    x_model = Dropout(0.5, name='dropout_2')(x_model)

    predictions = Dense(3, activation='sigmoid', name='output_layer')(x_model)

    model = Model(inputs=base_model.input, outputs=predictions)

    return model
def extract_features(directory):
    # load model
    model = ResNet50V2(weights="imagenet")
    model.layers.pop()
    # re-structure the model by replacing the last output layer
    model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
    # sumarize
    print(model.summary())

    # save model
    model.save("ResNet50_feature_extraction.h5")
    # extract features from each photo
    features = dict()
    num_images = len(listdir(directory))

    for index, name in enumerate(listdir(directory)):
        # load an image from file
        filename = directory + "/" + name
        image = load_img(filename, target_size=(224, 224))

        # convert image pixels to numpy array
        image = img_to_array(image)

        # reshape data
        image = np.expand_dims(image, axis=0)
        # image = image[np.newaxis,:]

        # preprocess image for model
        image = preprocess_input(image)

        # get features
        feature = model.predict(image, verbose=0)

        # get image_id
        image_id = name.split(".")[0]

        # store feature
        features[image_id] = feature
        print(">(%i/%i) %s" % (index, num_images, name))

    return features
Exemplo n.º 8
0
from keras.datasets import cifar10
from keras.applications import VGG16, VGG19, Xception, ResNet101, ResNet101V2, ResNet152
from keras.applications import ResNet152V2, ResNet50, ResNet50V2, InceptionV3, InceptionResNetV2
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile

from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, MaxPool2D, Flatten, BatchNormalization, Activation
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

#2. model
resnet = ResNet50V2(include_top=False, input_shape=(32, 32, 3))

# vgg.summary()

model = Sequential()
model.add(resnet)
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.summary()

#3. compile, fit
model.compile(optimizer=Adam(1e-4),
              loss='sparse_categorical_crossentropy',
              metrics=['acc'])
hist = model.fit(x_train,
                 y_train,
Exemplo n.º 9
0
def Resnet_Net(trainable=None, net="ResNet50"):

    netold = ['ResNet50', 'ResNet101', 'ResNet152']
    # Preprocessing the dataset into keras feedable format
    if net not in netold:
        train_datagen = ImageDataGenerator(rotation_range=rotation,
                                           width_shift_range=width_shift,
                                           height_shift_range=height_shift,
                                           rescale=scale,
                                           shear_range=shear,
                                           zoom_range=zoom,
                                           horizontal_flip=horizontal,
                                           fill_mode=fill,
                                           validation_split=validation)
        test_datagen = ImageDataGenerator(rescale=scale, )
    if net in netold:
        train_datagen = ImageDataGenerator(
            dtype='float32',
            preprocessing_function=preprocess_input,
            validation_split=validation)
        test_datagen = ImageDataGenerator(
            dtype='float32', preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='training',
    )
    validation_generator = train_datagen.flow_from_directory(
        path,
        target_size=target,
        batch_size=batch,
        class_mode='categorical',
        subset='validation')

    models_list = [
        'ResNet50', 'ResNet101', 'ResNet152', 'ResNet50V2', 'ResNet101V2',
        'ResNet152V2'
    ]

    # Loading the ResNet50 Model

    if net == "ResNet50":
        resnet = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=input_sh,
                          pooling=pooling_model)
    if net == "ResNet101":
        resnet = ResNet101(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet152":
        resnet = ResNet152(include_top=False,
                           weights='imagenet',
                           input_shape=input_sh,
                           pooling=pooling_model)
    if net == "ResNet50V2":
        resnet = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=input_sh,
                            pooling=pooling_model)
    if net == "ResNet101V2":
        resnet = ResNet101V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net == "ResNet152V2":
        resnet = ResNet152V2(include_top=False,
                             weights='imagenet',
                             input_shape=input_sh,
                             pooling=pooling_model)
    if net not in models_list:
        raise ValueError('Please provide the raise model ')
    output = resnet.layers[-1].output
    if pooling_model is None:
        output = keras.layers.Flatten()(output)
    resnet = Model(resnet.input, output=output)
    print(resnet.summary())
    print('\n\n\n')
    # If you chose not for fine tuning
    if trainable is None:
        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet  -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()

    if trainable is not None:
        # Make last block of the conv_base trainable:

        for layer in resnet.layers[:trainable]:
            layer.trainable = False
        for layer in resnet.layers[trainable:]:
            layer.trainable = True

        print('Last block of the conv_base is now trainable')

        for i, layer in enumerate(resnet.layers):
            print(i, layer.name, layer.trainable)

        model = Sequential()
        model.add(resnet)
        model.add(Dense(hidden, activation='relu', input_dim=input_sh))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        model.add(Dense(hidden, activation='relu'))
        model.add(Dropout(dropout_num))
        if classes == 1:
            model.add(Dense(classes, activation='sigmoid', name='Output'))
        else:
            model.add(Dense(classes, activation='softmax', name='Output'))

        for layer in resnet.layers:
            layer.trainable = False
        print("The model summary of Resnet -->\n\n\n"
              )  # In this the Resnet50 layers are not trainable
        model.compile(
            loss=loss_param,  # Change according to data
            optimizer=optimizers.RMSprop(),
            metrics=['accuracy'])
        print("The summary of final Model \n\n\n")
        print(model.summary())
        print('\n\n\n')

        fit_history = model.fit_generator(
            train_generator,
            steps_per_epoch=len(train_generator.filenames) // batch,
            epochs=epoch,
            shuffle=True,
            validation_data=validation_generator,
            validation_steps=len(train_generator.filenames) // batch,
            class_weight=n,
            callbacks=[
                EarlyStopping(patience=patience_param,
                              restore_best_weights=True),
                ReduceLROnPlateau(patience=patience_param)
            ])
        os.chdir(output_path)
        model.save("model.h5")
        print(fit_history.history.keys())
        plt.figure(1, figsize=(15, 8))

        plt.subplot(221)
        plt.plot(fit_history.history['accuracy'])
        plt.plot(fit_history.history['val_accuracy'])
        plt.title('model accuracy')
        plt.ylabel('accuracy')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.subplot(222)
        plt.plot(fit_history.history['loss'])
        plt.plot(fit_history.history['val_loss'])
        plt.title('model loss')
        plt.ylabel('loss')
        plt.xlabel('epoch')
        plt.legend(['train', 'valid'])

        plt.show()
Exemplo n.º 10
0
def define_model(model_type):

    model = Sequential()

    # get models from ~/.keras/models

    if model_type == 'VGG16':
        model = VGG16(include_top=False,
                      weights="imagenet",
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'VGG19':
        model = VGG19(include_top=False,
                      weights="imagenet",
                      input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'ResNet50':
        model = ResNet50(include_top=False,
                         weights="imagenet",
                         input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'DenseNet121':
        model = DenseNet121(include_top=False,
                            weights="imagenet",
                            input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'MobileNet':
        model = MobileNet(include_top=False,
                          weights="imagenet",
                          input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'InceptionV3':
        model = InceptionV3(include_top=False,
                            weights="imagenet",
                            input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'ResNet50V2':
        model = ResNet50V2(include_top=False,
                           weights="imagenet",
                           input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    elif model_type == 'Xception':
        model = Xception(include_top=False,
                         weights="imagenet",
                         input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    for layer in model.layers:
        layer.trainable = False

    if model_type == 'default':
        model.add(
            Conv2D(32, (3, 3),
                   activation='relu',
                   kernel_initializer='he_uniform',
                   padding='same',
                   input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        model.add(MaxPooling2D((2, 2)))
        model.add(
            Conv2D(64, (3, 3),
                   activation='relu',
                   kernel_initializer='he_uniform',
                   padding='same'))
        model.add(MaxPooling2D((2, 2)))
        model.add(
            Conv2D(128, (3, 3),
                   activation='relu',
                   kernel_initializer='he_uniform',
                   padding='same'))
        model.add(MaxPooling2D((2, 2)))

    # add new classifier layer
    flat1 = Flatten()(model.layers[-1].output)
    class1 = Dense(128, activation='relu',
                   kernel_initializer='he_uniform')(flat1)
    output = Dense(1, activation='sigmoid')(class1)
    # define new model
    model = Model(inputs=model.inputs, outputs=output)
    # compile model
    opt = SGD(lr=LEARNING_RATE, momentum=MOMENTUM)
    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())
    return model
Exemplo n.º 11
0
def train(num_epochs=100,
          num_files='all',
          seq_length=50,
          image_shape=(256 + 6, 256 + 6, 3),
          filepath='data_file.csv',
          datapath='resources/'):

    d = DataSet(filepath, datapath)
    if num_files == 'all':
        num_files_test = sum(y[0] == 'test' for y in d.data)
        num_files_train = sum(y[0] == 'train' for y in d.data)
    else:
        num_files_test = num_files
        num_files_train = num_files

    x, y = d.get_data('train',
                      num_files=num_files_train,
                      seq_length=seq_length,
                      image_shape=image_shape)
    x_test, y_test = d.get_data('test',
                                num_files=num_files_test,
                                seq_length=seq_length,
                                image_shape=image_shape)

    #% Build Model
    batch_size = num_files  #batch gradient decent
    num_epochs = 100
    num_features = 2048

    resnet = ResNet50V2(include_top=False, input_shape=image_shape)
    model = Sequential()
    model.add(TimeDistributed(resnet,
                              input_shape=(seq_length, ) + image_shape))
    model.add(TimeDistributed(MaxPool2D(pool_size=(9, 9))))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(units=num_features, activation='relu')))
    # Long Short Term Memory to identify temporal featurs
    # E.g. face moving weirdly in relation to rest of body
    model.add(LSTM(32, return_sequences=False, dropout=0.5))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))
    model.layers[0].trainable = False

    optimizer = Adam(lr=1e-5, decay=1e-6)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    print(model.summary())
    #% Fit
    # Callbacks
    tb = TensorBoard(log_dir=os.path.join('data', 'logs'))
    early_stopper = EarlyStopping(patience=5)
    timestamp = time.time()
    csv_logger = CSVLogger(os.path.join('data', 'logs', 'training-' + \
            str(timestamp) + '.log'))

    model.fit(x,
              y,
              batch_size=batch_size,
              validation_data=(x_test, y_test),
              epochs=num_epochs,
              verbose=1,
              callbacks=[tb, early_stopper, csv_logger])

    #%
    Path('model').mkdir(parents=True, exist_ok=True)
    model.save(os.path.join('model', 'model.keras'))
Exemplo n.º 12
0
def resnet50(image_size: Tuple[int, int], num_classes: int) -> Sequential:
    from keras.applications import ResNet50V2
    resnet_conv = ResNet50V2(weights="imagenet", include_top=False, input_shape=(*image_size, 3))

    return _fine_tuning_model(resnet_conv, num_classes)
Exemplo n.º 13
0
def encode_img(img: IMG_SZ):
    res_net = ResNet50V2(include_top=False,
                         weights='imagenet',
                         input_tensor=None)(img)
    return Flatten()(res_net)
Exemplo n.º 14
0
from keras.applications import ResNet50, ResNet50V2, InceptionV3, InceptionResNetV2
from keras.applications import MobileNet, MobileNetV2, DenseNet121, DenseNet169, DenseNet201
from keras.applications import NASNetLarge, NASNetMobile
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, BatchNormalization, Activation
from keras.optimizers import Adam

# vgg16 = VGG16() # (None, 224, 224, 3)
# model = VGG19()
model = Xception()
model = ResNet101()
model = ResNet101V2()
model = ResNet152()
model = ResNet152V2()
model = ResNet50()
model = ResNet50V2()
model = InceptionV3()
model = InceptionResNetV2()
model = MobileNet()
model = MobileNetV2()
model = DenseNet121()
model = DenseNet169()
model = DenseNet201()
model = NASNetLarge()
model = NASNetMobile()

# vgg16.summary()
'''
model= Sequential()
# model.add(vgg16)
# model.add(Flatten())
Exemplo n.º 15
0
#%%original vgg16
rows = 80
cols = 120
channels = 1
#model = VGG16(include_top=False,input_shape=(rows,cols,channels))
model = load_model(r"log\11_24_keras\KivlNet_part2_16.h5")
x = model.get_layer('conv2d_3').output
#x = model.get_layer('block3_conv3').output
# x = model.get_layer('lambda_6').output
model = Model(input=model.input, output=x)
#model.save("vgg_featur.h5")
#%%resnet50_v2
rows = 320
cols = 480
channels = 3
model = ResNet50V2(include_top=False, input_shape=(rows, cols, channels))
x = model.get_layer('conv3_block1_2_conv').output
# x = model.get_layer('block3_conv3').output
# x = model.get_layer('lambda_6').output
model = Model(input=model.input, output=x)
# model.save("resnet50_v2.h5")
#%%
# img = cv2.imread(r'D:\dataset\yushikeji\test\0.tiff')
# #img = cv2.resize(img,(320,180))
# img_batch = np.expand_dims(img, axis=0)
# conv_img = model.predict(img_batch)  # conv_img 卷积结果
# conv_img_sum = np.sum(conv_img,axis=3)
# conv_img_sum = np.squeeze(conv_img_sum,axis=0)
# conv_img_sum = 1/(conv_img_sum/conv_img_sum.max())
# ret,conv_img_sum=cv2.threshold(np.expand_dims(conv_img_sum,axis=2),5,1,cv2.THRESH_BINARY)
# conv_img_sum[:40,:] = 0
Exemplo n.º 16
0
    cxr_val, label_val = loadCovid19ClassificationData(covid_validation_path,notcovid_validation_path, im_shape)

    # data label encoding
    label_encoder = LabelEncoder()
    label_train = label_encoder.fit_transform(label_train)
    label_train = to_categorical(label_train)
    label_val = label_encoder.fit_transform(label_val)
    label_val = to_categorical(label_val)

    # model and hyperparameters definition
    cxr_image_shape = cxr_train[0].shape
    num_classes = 2

    resnet50_model = ResNet50V2(
        include_top=False,
        weights="imagenet",
        input_shape=cxr_image_shape
    )

    new_output_layer = resnet50_model.output
    new_output_layer = GlobalAveragePooling2D()(new_output_layer)
    new_output_layer = Dropout(0.5)(new_output_layer)
    new_output_layer = Dense(num_classes, activation='sigmoid')(new_output_layer)
    resnet50_model = Model(inputs=resnet50_model.input, outputs=new_output_layer)

    resnet50_model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])

    epochs = 30
    results = resnet50_model.fit(
        cxr_train,
        label_train,
Exemplo n.º 17
0
def main(config=None):
    trial_name = os.path.splitext(__file__)[0]
    model_filename = os.path.sep.join(["output", trial_name, "model.h5"])
    checkpoint_folder = os.path.sep.join(["output", trial_name])
    from pathlib import Path
    Path(checkpoint_folder).mkdir(parents=True, exist_ok=True)

    import pandas as pd
    from keras.models import Sequential, load_model
    from keras.layers import Dense, Flatten, Dropout
    from keras.preprocessing.image import ImageDataGenerator
    from keras.optimizers import Adam
    from keras.applications import ResNet50V2
    from keras.applications.resnet_v2 import preprocess_input
    import tensorflow as tf

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        _ = tf.config.experimental.set_memory_growth(physical_devices[0], True)

    import wandb
    from wandb.keras import WandbCallback
    if (config is None):
        wandb.init(project="minibar")
        config = wandb.config
    else:
        wandb.init(project="minibar", config=config)

    df_train = pd.read_csv('data/train_labels.csv')

    from helpers.decouple import decouple
    matrix_train, _ = decouple(df_train)
    from helpers.matrix_to_df import matrix_to_dfcount
    df_train_agg = matrix_to_dfcount(matrix_train)

    train_datagen = ImageDataGenerator(validation_split=0.2,
                                       horizontal_flip=True,
                                       preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_dataframe(
        dataframe=df_train_agg,
        directory='data/train',
        x_col='filename',
        y_col='count',
        target_size=(config['input_shape_height'],
                     config['input_shape_width']),
        batch_size=config['batch_size'],
        class_mode='raw',
        subset="training",
    )

    validation_generator = train_datagen.flow_from_dataframe(
        dataframe=df_train_agg,
        directory='data/train',
        x_col='filename',
        y_col='count',
        target_size=(config['input_shape_height'],
                     config['input_shape_width']),
        batch_size=config['batch_size'],
        class_mode='raw',
        subset="validation",
    )

    if os.path.isfile(model_filename) and config['continue_training']:
        model = load_model(model_filename)
    else:
        model = Sequential()

        model.add(
            ResNet50V2(include_top=False,
                       input_shape=(config['input_shape_height'],
                                    config['input_shape_width'], 3)))

        model.add(Flatten())
        model.add(Dense(units=512, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(units=1))

        model.compile(optimizer=Adam(learning_rate=config['learning_rate']),
                      loss='mean_squared_error',
                      metrics=['accuracy'])
        model.save(model_filename)

    # construct the set of callbacks
    from helpers.epochcheckpoint import EpochCheckpoint
    callbacks = [
        EpochCheckpoint(checkpoint_folder, every=1, startAt=0),
        WandbCallback(save_model=False)
    ]

    model.fit(
        train_generator,
        #steps_per_epoch=100,
        epochs=config['epoch'],
        #steps_per_epoch=100,
        validation_data=validation_generator,
        #validation_steps=100
        callbacks=callbacks,
        verbose=1,
        initial_epoch=config['initial_epoch'])
    model.save(model_filename)
Exemplo n.º 18
0
def main(config=None):
    trial_name = os.path.splitext(__file__)[0]
    model_filename = os.path.sep.join(["output", trial_name,"model.h5"])
    checkpoint_folder = os.path.sep.join(["output", trial_name])
    from pathlib import Path
    Path(checkpoint_folder).mkdir(parents=True, exist_ok=True)

    #import numpy as np # linear algebra
    import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
    #import matplotlib.pyplot as plt
    import matplotlib
    matplotlib.use("Agg")


    from keras.models import Sequential,load_model
    #from keras.layers import Dense, Conv2D, Flatten, MaxPooling2D, Conv2DTranspose, BatchNormalization, UpSampling2D, Reshape
    from keras.layers import Dense, Flatten, Dropout
    from keras.metrics import Precision,Recall
    from helpers.overallperformance import OverallPerformance
    #from keras import backend as K
    #from keras.utils import to_categorical
    from keras.preprocessing.image import ImageDataGenerator
    from keras.optimizers import Adam
    from keras.applications import ResNet50V2
    from keras.applications.resnet_v2 import preprocess_input
    import tensorflow as tf

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        _ = tf.config.experimental.set_memory_growth(physical_devices[0], True)

    import wandb
    from wandb.keras import WandbCallback
    if(config is None):
        wandb.init(project="minibar")
        config = wandb.config
    else:
        wandb.init(project="minibar",config=config)

    df_train =  pd.read_csv('data/train_labels.csv')
    #df_test =  pd.read_csv('data/test_labels.csv')

    from helpers.decouple import decouple
    matrix_train,_ = decouple(df_train)
    from helpers.matrix_to_df import matrix_to_df
    df_train_agg = matrix_to_df(matrix_train)

    train_datagen = ImageDataGenerator(
            validation_split=0.2,horizontal_flip=True,preprocessing_function=preprocess_input)

    train_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="training",)

    validation_generator = train_datagen.flow_from_dataframe(
            dataframe=df_train_agg,
            directory='data/train',
            x_col='filename',
            y_col='class',
            target_size=(config['input_shape_height'], config['input_shape_width']),
            batch_size=config['batch_size'],
            class_mode='categorical',
            subset="validation",)


    if os.path.isfile(model_filename) and config['continue_training']:
        model = load_model(model_filename)
    else:
        model = Sequential()

        model.add(ResNet50V2(include_top=False,input_shape=(config['input_shape_height'], config['input_shape_width'],3)))

        model.add(Flatten())
        model.add(Dense(units = 512, activation = 'relu'))
        model.add(Dropout(0.5)) 
        model.add(Dense(units = 40, activation = 'sigmoid'))

        model.compile(optimizer=Adam(learning_rate=config['learning_rate']), loss='binary_crossentropy', metrics=['accuracy',Precision(),Recall(),OverallPerformance()])
        model.save(model_filename)

    # construct the set of callbacks
    from helpers.epochcheckpoint import EpochCheckpoint
    callbacks = [
        EpochCheckpoint(checkpoint_folder, every=1,startAt=0),
        WandbCallback(save_model=False)
    ]

    model.fit(
        train_generator,
        #steps_per_epoch=100,
        epochs=config['epoch'],
        #steps_per_epoch=100,
        validation_data=validation_generator,
        #validation_steps=100
        callbacks=callbacks,
        verbose=1,
        initial_epoch=config['initial_epoch']
        )
    model.save(model_filename)