Ejemplo n.º 1
0
def frozen_resnet(input_size,
                  n_classes,
                  local_weights="/resnets/resnet50v2_notop.h5"):
    if local_weights and path.exists(local_weights):
        print(f'Using {local_weights} as local weights.')
        model_ = ResNet50V2(include_top=False,
                            input_tensor=Input(shape=input_size),
                            weights=local_weights)
    else:
        print(
            f'Could not find local weights {local_weights} for ResNet. Using remote weights.'
        )
        model_ = ResNet50V2(include_top=False,
                            input_tensor=Input(shape=input_size))
    #여기까지 초기값 주는 부분

    for layer in model_.layers:
        layer.trainable = False  # 전이학습을 위해 freeze 한다는것 같다.

    #좀 의문인게 freezing할거면 로컬웨잇을 쓰겠다는건데 그러면 학습이 안되는거 아닌감요? 심지어 코드상으로는 아무리 봐도 전체 레이어 프리징인데
    #근데 에폭 늘렸을때 학습이 잘 되는거 보면 그것도 이상함
    x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = Dense(n_classes, activation='softmax')(x)

    frozen_model = Model(model_.input, x)

    return frozen_model
Ejemplo n.º 2
0
def frozen_resnet(input_size, n_classes, local_weights="/resnets/resnet50v2_notop.h5"):
    if local_weights and path.exists(local_weights):
        print(f'Using {local_weights} as local weights.')
        model_ = ResNet50V2(
            include_top=False,
            input_tensor=Input(shape=input_size),
            weights=local_weights)
    else:
        print(
            f'Could not find local weights {local_weights} for ResNet. Using remote weights.')
        model_ = ResNet50V2(
            include_top=False,
            input_tensor=Input(shape=input_size))

    #x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = model_.output
    x = GlobalAveragePooling2D()(x)
    #x = Dense(256, kernel_regularizer=l2(0.01), bias_regularizer=l2(0.01), activation='relu')(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.5)(x)
    #x = Dense(256, activation='relu')(x)
    #x = Dropout(0.5)(x)
    x = Dense(n_classes, activation='softmax')(x)
    frozen_model = Model(model_.input, x)

    return frozen_model
def MyModel(params, f):

    model_name = params.models + str(f) + "epochs:001-val_acc:0.439.hdf5"
    if not os.path.isfile(model_name):
        print("Creating Model : ", model_name)
        model_base = ResNet50V2(include_top=False,
                                weights='imagenet',
                                input_tensor=None,
                                input_shape=(params.psize[0], params.psize[1],
                                             params.nch),
                                pooling='avg',
                                classes=3)

        x = model_base.get_layer('avg_pool').output
        dense = Dense(15,
                      kernel_initializer='he_normal',
                      bias_initializer='zeros',
                      kernel_regularizer=l1(0.01),
                      name='classifier')(x)

        prediction = Activation("softmax", name="softmax")(dense)

        model = Model(model_base.inputs, outputs=prediction, name='ResNet')

        loadmdl = 0
    else:
        print("Loading model: ", model_name)
        #model = load_model( model_name+'.model')
        #print("Creating Model : ", model_name)
        model_base = ResNet50V2(include_top=False,
                                weights='imagenet',
                                input_tensor=None,
                                input_shape=(params.psize[0], params.psize[1],
                                             params.nch),
                                pooling='avg',
                                classes=3)

        x = model_base.get_layer('avg_pool').output
        dense = Dense(15,
                      kernel_initializer='he_normal',
                      bias_initializer='zeros',
                      kernel_regularizer=l1(0.01),
                      name='classifier')(x)

        prediction = Activation("softmax", name="softmax")(dense)

        model = Model(model_base.inputs, outputs=prediction, name='ResNet')

        model.load_weights(model_name)
        loadmdl = 1

    return model, model_name, loadmdl
Ejemplo n.º 4
0
    def define_model(self):
        model = Sequential()
        model.add(
            ResNet50V2(weights=None,
                       include_top=False,
                       input_shape=(self.IMG_HEIGHT, self.IMG_WIDTH, 3)))
        model.add(
            Conv2D(1024, kernel_size=(1, 1), activation='relu',
                   strides=(1, 1)))
        model.add(
            keras.layers.BatchNormalization(axis=-1,
                                            momentum=0.99,
                                            epsilon=0.001))
        model.add(
            Conv2D(1024, kernel_size=(3, 3), activation='relu',
                   strides=(1, 1)))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(
            Conv2D(1024, kernel_size=(1, 1), activation='relu',
                   strides=(1, 1)))
        model.add(Flatten())
        model.add(Dense(512, activation='relu'))
        model.add(
            keras.layers.BatchNormalization(axis=-1,
                                            momentum=0.99,
                                            epsilon=0.001))
        model.add(Dense(1, activation='sigmoid'))
        return model
Ejemplo n.º 5
0
def build_2layer_model(Params):
    '''
    Build a complex model with one more traind layer
    :param Params: pipe parameters
    :return: pre=trained resNet model
    '''

    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=(224, 224, 3),
                            pooling='avg')

    last_layer = base_model.output

    new_layer = Dense(units=3, activation='relu')(last_layer)

    predictions = Dense(1, activation='sigmoid')(new_layer)

    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in base_model.layers:
        layer.trainable = False

    optimizer = SGD(learning_rate=Params['Optimizer']['SGD']['learning_rate'],
                    momentum=Params['Optimizer']['SGD']['momentum'],
                    nesterov=Params['Optimizer']['SGD']['nesterov'])

    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])

    return model
Ejemplo n.º 6
0
def build_model(backbone_name, input_shape, n_classes):
    """Build neural network architecture

    @param backbone_name:                       name of the backbone from implemented names
    @param input_shape:                         neural network input shape
    @param n_classes:                           number of classes
    @return:                                    classification model
    """
    inputs = Input(shape=input_shape)

    if backbone_name == 'inceptionv3':
        backbone = InceptionV3(weights=None, include_top=True, classes=n_classes)(inputs)
    elif backbone_name == 'densenet121':
        backbone = DenseNet121(weights=None, include_top=True, classes=n_classes)(inputs)
    elif backbone_name == 'efficientnetb2':
        backbone = EfficientNetB2(weights=None, include_top=True, classes=n_classes)(inputs)
    elif backbone_name == 'mobilenetv2':
        backbone = MobileNetV2(weights=None, include_top=True, classes=n_classes)(inputs)
    elif backbone_name == 'resnet50':
        backbone = ResNet50V2(weights=None, include_top=True, classes=n_classes)(inputs)
    else:
        raise ValueError(f'Not implemented for {backbone_name} backbone.')

    final_model = Model(inputs, backbone)

    final_model.summary()

    return final_model
Ejemplo n.º 7
0
def extract_feature(X, cnn_arch="resnet50"):
    if cnn_arch == "resnet50":
        from keras.applications.resnet import preprocess_input
        cnn = ResNet50(include_top=False, weights='imagenet')

    elif cnn_arch == "resnet50v2":
        from keras.applications.resnet_v2 import preprocess_input
        cnn = ResNet50V2(include_top=False, weights='imagenet')
    elif cnn_arch == "inceptionv3":
        from keras.applications.inception_v3 import preprocess_input
        cnn = InceptionV3(include_top=False, weights='imagenet')
        X = np.array([resize(X, (299, 299, 3)) for x in X])
        import ipdb
        ipdb.set_trace()
    else:
        raise ValueError(f"Not supported cnn_arch {cnn_arch}")

    input = Input(shape=X.shape[1:], name='image_input')
    x = cnn(input)
    x = Flatten()(x)
    model = Model(inputs=input, outputs=x)

    X = preprocess_input(X)

    return model.predict(X, batch_size=64)
Ejemplo n.º 8
0
    def load_model(self):
        FACTOR = 0.70
        HEIGHT = 137
        WIDTH = 236
        HEIGHT_NEW = int(HEIGHT * FACTOR)
        WIDTH_NEW = int(WIDTH * FACTOR)
        
        base_model=ResNet50V2(include_top=False, weights='imagenet', input_tensor=None, input_shape=(HEIGHT_NEW,WIDTH_NEW,3), pooling=None, classes=1000)
        # base_model.trainable=False
        x = base_model.output
        x = layers.GlobalAveragePooling2D()(x)
        
        grapheme_root = layers.Dense(168, activation = 'softmax', name = 'root')(x)
        vowel_diacritic = layers.Dense(11, activation = 'softmax', name = 'vowel')(x)
        consonant_diacritic = layers.Dense(7, activation = 'softmax', name = 'consonant')(x)

        model = Model(inputs=base_model.input,outputs = [grapheme_root, vowel_diacritic, consonant_diacritic])
        for layer in base_model.layers:
            layer.trainable = True
        model.compile(optimizer='adam', loss = {'root' : 'categorical_crossentropy', 
                    'vowel' : 'categorical_crossentropy', 
                    'consonant': 'categorical_crossentropy'},
                    loss_weights = {'root' : 0.5,
                            'vowel' : 0.25,
                            'consonant': 0.25},
                    metrics={'root' : 'accuracy', 
                    'vowel' : 'accuracy', 
                    'consonant': 'accuracy'})
        # print(model.summary())

        return model
Ejemplo n.º 9
0
    def build_model(self, weights, includeTop, inputShape):
        baseModel = ResNet50V2(weights=weights,
                               include_top=includeTop,
                               input_shape=inputShape)

        model = K.models.Model(inputs=baseModel.input,
                               outputs=baseModel.get_layer('avg_pool').output)

        return model
Ejemplo n.º 10
0
def frozen_resnet(input_size, n_classes):
    model_ = ResNet50V2(include_top=False, input_tensor=Input(shape=input_size))
    for layer in model_.layers:
        layer.trainable = False
    x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, activation='softmax')(x)
    frozen_model = Model(model_.input, x)

    return frozen_model
Ejemplo n.º 11
0
def frozen_resnet(input_size, n_classes, local_weights="/resnets/resnet50v2_notop.h5"):
    if local_weights and path.exists(local_weights):
        print(f'Using {local_weights} as local weights.')
        model_ = ResNet50V2(
            include_top=False,
            input_tensor=Input(shape=input_size),
            weights=local_weights)
    else:
        print(
            f'Could not find local weights {local_weights} for ResNet. Using remote weights.')
        model_ = ResNet50V2(
            include_top=False,
            input_tensor=Input(shape=input_size))
    for layer in model_.layers:
        layer.trainable = False
    x = Flatten(input_shape=model_.output_shape[1:])(model_.layers[-1].output)
    x = Dense(n_classes, activation='softmax')(x)
    frozen_model = Model(model_.input, x)

    return frozen_model
def createModel(s,
                optimizer,
                lr,
                outputNeurons=1,
                dropoutProp=0.0,
                layers=False,
                flat=False,
                max=False,
                decay=0.0,
                moment=0.9):
    """
   function that preapre the model for training
   :param s: size of the photo
   :param optimizer: string- which optimizer to use
   :param lr: learning rate of the net
   :param dropoutProp: rate for dropout configuration
   :param layers: boolean for adding 2 new layers
   :param flat: boolean that indicates if to flatten the 1 before last layer
   :param max: boolean that indicates if to use max pooling instead of average pooling
   :param decay: weight decay
   :return: model after compile
   """
    input = Input(shape=(s, s, 3))
    net = ResNet50V2(include_top=True,
                     weights='imagenet',
                     input_tensor=input,
                     input_shape=None,
                     pooling=None,
                     classes=1000)
    if (flat == True | max == True):
        lastLayer = net.get_layer('post_relu').output
        if (flat):
            lastLayer = Flatten()(lastLayer)
        else:
            lastLayer = GlobalMaxPooling2D()(lastLayer)
    else:
        lastLayer = net.get_layer('avg_pool').output
    if (layers == True):
        out, y = addLayers(lastLayer, dropoutProp, outputNeurons)
    else:
        y = -1
        out = Dense(units=outputNeurons,
                    name='output_layer',
                    activation='sigmoid')(lastLayer)
    model = Model(input, out)
    for layer in model.layers[:y]:
        layer.trainable = False
    optim = defineOptimizer(optimizer, lr, decay, moment)
    model.compile(optimizer=optim,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Ejemplo n.º 13
0
def extract_features(directory, textfile, m):
    if m == 1:
        # load the model
        model = VGG16()
        # re-structure the model
        model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
        # summarize
        print(model.summary())
        m = "vgg16"
    elif m == 2:
        # load the model
        model = InceptionV3(weights='imagenet')
        # re-structure the model
        model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
        # summarize
        print(model.summary())
        m = "IV3"
    else:
        # load the model
        model = ResNet50V2()
        # re-structure the model
        model = Model(inputs=model.inputs, outputs=model.layers[-2].output)
        # summarize
        print(model.summary())
        m = "ResNet50V2"

    # Read Train ids as a list for feature extraction
    with open(textfile) as f:
        train_ids = f.read().splitlines()
    # extract features from each photo
    features = dict()
    for name in train_ids:
        # load an image from file
        filename = directory + name
        image = load_img(filename, target_size=(224, 224))
        # convert the image pixels to a numpy array
        image = img_to_array(image)
        # reshape data for the model
        image = image.reshape(
            (1, image.shape[0], image.shape[1], image.shape[2]))
        # prepare the image for the VGG model
        image = preprocess_input(image)
        # get features
        feature = model.predict(image, verbose=0)
        # get image id
        image_id = name.split('.')[0]
        # store feature
        features[image_id] = feature
        print('>%s' % name)
    return features, m
Ejemplo n.º 14
0
def RF_createResNetModel(train_x, train_y, Params):
    '''
    1. creating and model with a single-neuron output + training it
    2. removes the single neuron to have a output of 50 values
    :param train_x: images
    :param train_y: labels
    :param Params: pipe parameters
    :return: trained  model for features extraction
    '''

    print('Building and fitting the ResNet')
    # creating and training a new model with new 100 neurons layer
    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=(224, 224, 3),
                            pooling='avg')

    last_layer = base_model.output

    # add layer with 50 neurons
    new_layer = Dense(50, activation='sigmoid')(last_layer)
    # add the output layer with sigmoid activation
    predictions = Dense(1, activation='sigmoid')(new_layer)

    model = Model(inputs=base_model.input, outputs=predictions)

    for layer in base_model.layers:
        layer.trainable = False

    optimizer = SGD(learning_rate=Params['Optimizer']['SGD']['learning_rate'],
                    decay=Params['Optimizer']['SGD']['decay'],
                    momentum=Params['Optimizer']['SGD']['momentum'],
                    nesterov=Params['Optimizer']['SGD']['nesterov'])

    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])

    model.fit(train_x,
              train_y,
              batch_size=Params['Model']['batch_size'],
              epochs=Params['Model']['epochs'])

    print('finished training the ResNet')

    # remove the last single-neuron layer to stay with 50 neurons layer
    model.layers.pop()
    model2 = Model(model.input, model.layers[-1].output)

    return model2
Ejemplo n.º 15
0
def pearl_type_model_resnet50v2(my_training_batch_generator, my_test_batch_generator,
                                save_dir=os.path.join(os.getcwd(), 'saved_models')
                                , batch_size=32
                                , input_shape=(40, 40, 3)):
    model_name = 'trained_model_resnet50v2.h5'
    restnet = ResNet50V2(include_top=False, weights=None, input_shape=input_shape, classes=4)
    output = restnet.layers[-1].output
    output = keras.layers.Flatten()(output)
    restnet = Model(restnet.input, output=output)
    for layer in restnet.layers:
        layer.trainable = False
    restnet.summary()
    model = Sequential()
    model.add(restnet)
    model.add(Dense(512, activation='relu', input_dim=input_shape))
    # model.add(Dropout(0.3))
    model.add(Dense(512, activation='relu'))
    # model.add(Dropout(0.3))
    model.add(Dense(4, activation='softmax'))
    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=optimizers.adam(lr=0.01),
                  metrics=['accuracy'])
    model.summary()

    n_train = My_Custom_Generator.getNumber(my_training_batch_generator)
    n_test = My_Custom_Generator.getNumber(my_test_batch_generator)
    print('number of training images: ', n_train)
    print('number of val images: ', n_test)

    history = model.fit_generator(my_training_batch_generator,
                                  steps_per_epoch=int(n_train // batch_size),
                                  epochs=5,
                                  validation_data=my_test_batch_generator,
                                  validation_steps=int(n_test // batch_size),
                                  verbose=1)

    hist_df = pd.DataFrame(history.history)

    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)
    print('type of model name: ', model_name)
    model_path = os.path.join(save_dir, str(model_name))
    model.save(model_path)
    print('Saved trained model at %s ' % model_path)

    # save to pickle:
    hist_file = '_history'
    with open(model_path + hist_file, 'wb') as file_pi:
        pickle.dump(history, file_pi)
Ejemplo n.º 16
0
def nn(n_classes, fc_d=512):
    model = Sequential()

    model.add(
        ResNet50V2(input_shape=(224, 224, 3),
                   include_top=False,
                   weights='imagenet'))

    model.add(Flatten())
    dense_norm_relu(model, fc_d)
    dense_norm_relu(model, fc_d)

    model.add(Dense(n_classes))
    model.add(Softmax())
    return model
Ejemplo n.º 17
0
def build_COVIDNet(num_classes=3, flatten=True, checkpoint=''):
    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=(224, 224, 3))
    x = base_model.output
    if flatten:
        x = Flatten()(x)
    else:
        x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(num_classes, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    if len(checkpoint):
        model.load_weights(checkpoint)
    return model
Ejemplo n.º 18
0
def ResNet(input_shape, classes):

    input_tensor = Input(shape=input_shape)
    base_model = ResNet50V2(input_tensor=input_tensor,
                            include_top=False,
                            weights=None)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(classes, activation='softmax')(x)
    model = Model(inputs=input_tensor, outputs=predictions)
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer='rmsprop',
        #                       optimizer=keras.optimizers.Adadelta(),
        metrics=['accuracy'])
    return model
Ejemplo n.º 19
0
def ResNet50V2modelA(no_classes, shape):
    """
        Deep CNN 50 layers
    """
    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=shape)
    # Taking the output of the last convolution block in ResNet50
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax')(x)
    model = Model(base_model.input, outputs=predictions)
    # Training only top layers i.e. the layers which we have added in the end
    for layer in base_model.layers:
        layer.trainable = False
    return model
Ejemplo n.º 20
0
	def __init__(self, name, lr=0.001):
		super().__init__(name = name,  lr = lr)
		weight_decay = 0.0001
		pretrained = ResNet50V2(include_top = False, weights = 'imagenet', classes = 4)

		self.input_size = (224,224, 3)
		self.model = Sequential()
		self.model.add(pretrained)
		self.model.add(GlobalAveragePooling2D())
		self.model.add(Dropout(rate = 0.5))
		self.model.add(Dense(2048, activation='relu'))
		self.model.add(Dropout(rate = 0.5))
		self.model.add(Dense(4, activation='softmax'))

		# adam_opti = Adam(lr = learning_rate)
		sgd_opti = SGD(lr = self.lr, momentum = 0.9)
		self.model.compile(optimizer = sgd_opti, loss='categorical_crossentropy', metrics = ['accuracy'])
Ejemplo n.º 21
0
def ResNet50V2model(no_classes, shape):
    """
        Deep CNN 50 layers
    """
    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=shape)
    # Freeze the base_model
    base_model.trainable = False
    inputs = Input(shape=(224, 224, 3))
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu', name='predictions')(x)
    #x = Dropout(0.2)(x)
    predictions = Dense(no_classes, activation='softmax')(x)
    model = Model(inputs, outputs=predictions)
    return model
Ejemplo n.º 22
0
    def build_model(self):
        #================= Settings =========================
        weight_decay = 0.001

        #================= Dense ============================
        base_model = ResNet50V2(weights='imagenet', input_shape=(self.input_shape), include_top=False)
        # base_model = ResNet50V2(weights='imagenet', include_top=False)
        x = base_model.output
        x = GlobalAveragePooling2D()(x)

        #================= Output - classification head ============================
        classification_output = Dense(self.num_classes, name="classification_head_before_activation")(x)
        classification_output = Activation('softmax', name="classification_head")(classification_output)

        #================= The final model ============================
        for layer in base_model.layers:
            layer.trainable = True
        model = Model(inputs=base_model.input, outputs=classification_output)
        return model
def base_Network():
    '''
    Build task transfer from ResNet50V2 - remove last layer and add layer dense sigmoid
    :return the new base model
    '''
    # Create the base model from the pre-trained model ResNet50V2
    base_model = ResNet50V2(include_top=False,
                            weights='imagenet',
                            input_shape=(224, 224, 3),
                            pooling='avg')
    #base_model = VGG16(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
    # Freeze the convolutional base
    outputX = base_model.layers[-1].output
    out = Dense(1, activation='sigmoid')(outputX)
    base_model = Model(base_model.input, outputs=out)
    for layer in base_model.layers[:-1]:
        layer.trainable = False
    #base_model.summary()
    optimizer = optimizers.RMSprop(lr=0.005)
    base_model.compile(loss='binary_crossentropy',
                       optimizer=optimizer,
                       metrics=['accuracy'])
    return base_model
def N_last_layers_AND_more_Network(NetworkParams):
    '''
    Build task transfer from ResNet50V2 - remove N last layers and add dropout, FC layer and layer dense sigmoid.
    :return the new base model
    '''
    # Create the base model from the pre-trained model ResNet50V2
    model = ResNet50V2(include_top=False,
                       weights='imagenet',
                       input_shape=(224, 224, 3))
    #model.summary()
    # Freeze the convolutional base
    for layer in model.layers[:-NetworkParams['N_layers']]:
        layer.trainable = False
    global_average_layer = Flatten(name='GAvgPool2D')
    drop1 = Dropout(rate=NetworkParams['dropout'])
    fullyConnected1 = Dense(1000,
                            activation='relu',
                            name='fullyConnected1',
                            kernel_regularizer=regularizers.l2(0.05))
    drop2 = Dropout(rate=NetworkParams['dropout'])
    fullyConnected2 = Dense(1000,
                            activation='relu',
                            name='fullyConnected2',
                            kernel_regularizer=regularizers.l2(0.05))
    drop3 = Dropout(rate=NetworkParams['dropout'])
    prediction_layer = Dense(1, activation='sigmoid', name='predictions')
    #drop2, fullyConnected2, drop3,
    Improved_model = Sequential([
        model, drop1, global_average_layer, fullyConnected1, prediction_layer
    ])
    #Improved_model.summary()

    optimizer = optimizers.Adam(lr=0.005)
    Improved_model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
    return Improved_model
Ejemplo n.º 25
0
def build_baseline_model(Params):
    '''
    bulidng a pre-trined baseline model
    :param Params: pipe parameters
    :return: pre=trained resNet model
    '''

    # using the built-in option of avg pooling of the last cov layer
    model_1 = ResNet50V2(include_top=False,
                         weights='imagenet',
                         input_shape=(224, 224, 3),
                         pooling='avg')

    # Taking the output of the ResNet50 vector
    last_layer = model_1.output

    # adding the output layer using the sigmoid function to get probability
    predictions = Dense(1, activation='sigmoid')(last_layer)

    # Model to be trained
    model = Model(inputs=model_1.input, outputs=predictions)

    # Train only the layers which we have added at the end
    for layer in model_1.layers:
        layer.trainable = False

    optimizer = SGD(learning_rate=Params['Optimizer']['SGD']['learning_rate'],
                    momentum=Params['Optimizer']['SGD']['momentum'],
                    nesterov=Params['Optimizer']['SGD']['nesterov'])

    # using SGD(stochastic gradient descent)
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])

    return model
def N_Last_Layers_Network(NetworkParams):
    '''
    Build task transfer from ResNet50V2 - remove  N last layers and add layer dense sigmoid
    :return the new base model
    '''
    # Create the base model from the pre-trained model ResNet50V2
    model = ResNet50V2(include_top=False,
                       weights='imagenet',
                       input_shape=(224, 224, 3),
                       pooling='avg')
    model.summary()
    outputX = model.layers[-1].output
    out = Dense(1, activation='sigmoid')(outputX)
    Improved_model = Model(model.input, outputs=out)
    # Freeze the convolutional base
    for layer in Improved_model.layers[:-NetworkParams['N_layers']]:
        layer.trainable = False
    Improved_model.summary()

    optimizer = optimizers.Adam(lr=0.005)
    Improved_model.compile(loss='binary_crossentropy',
                           optimizer=optimizer,
                           metrics=['accuracy'])
    return Improved_model
Ejemplo n.º 27
0
def trainModel(e, b, t):

    # Epoch = 1
    # Batch = 16
    # timeTrain = 2
    Epoch = e
    Batch = b
    timeTrain = t

    text_file = open('Chap6Result/' + str(Epoch) + "." + str(Batch) + ".txt",
                     "w")

    for i in range(timeTrain):
        print('Download Model...')
        base_model = ResNet50V2(include_top=False,
                                weights='imagenet',
                                input_shape=(224, 224, 3),
                                classes=3)
        x = base_model.output

        # Epoch = 50
        # Batch = 16
        Aug = 'True'

        x = GlobalAveragePooling2D()(x)
        # x = Dense(1024, activation='relu')(x)
        # x = Dense(1024, activation='relu')(x)
        # x = Dense(1024, activation='relu')(x)
        # x = Dense(1024, activation='relu')(x)
        # x = Dense(512, activation='relu')(x)
        predictions = Dense(3, activation='softmax')(x)

        model = Model(inputs=base_model.input, outputs=predictions)

        train_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input,
            zoom_range=0.2,
            width_shift_range=0.3,
            height_shift_range=0.3,
            shear_range=0.2,
            brightness_range=(0.5, 1.5),
            horizontal_flip=True,
            vertical_flip=True,
            fill_mode='nearest')

        print('Create dataset ...')
        train_generator = train_datagen.flow_from_directory(
            'Images/Train',
            target_size=(224, 224),
            color_mode='rgb',
            batch_size=Batch,
            class_mode='categorical',
            shuffle=True)

        model.compile(optimizer='Adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        step_size_train = train_generator.n // train_generator.batch_size

        test_datagen = ImageDataGenerator(
            preprocessing_function=preprocess_input)
        validation_generator = test_datagen.flow_from_directory(
            "Images/Test",
            target_size=(224, 224),
            color_mode='rgb',
            batch_size=Batch,
            class_mode='categorical',
            shuffle=False)

        print('Training ...')
        H = model.fit_generator(generator=train_generator,
                                validation_data=validation_generator,
                                steps_per_epoch=step_size_train,
                                epochs=Epoch)

        source = "ResNetModel/"
        finishTime = time.strftime("%Y.%m.%d_%H.%M.%S")
        os.makedirs(source + finishTime)

        N = Epoch
        plt.style.use("ggplot")
        plt.figure()
        plt.plot(np.arange(0, N), H.history["loss"], label="train_loss")
        plt.plot(np.arange(0, N), H.history["val_loss"], label="val_loss")
        plt.title("Training Loss on Dataset")
        plt.xlabel("Epoch #")
        plt.ylabel("Loss")
        plt.legend(loc="lower left")
        plt.savefig(source + finishTime + "/loss.png")

        plt.figure()
        plt.plot(np.arange(0, N), H.history["accuracy"], label="train_acc")
        plt.plot(np.arange(0, N), H.history["val_accuracy"], label="val_acc")
        plt.title('Training Accuracy on Dataset')
        plt.xlabel("Epoch #")
        plt.ylabel("Accuracy")
        plt.legend(loc="lower left")
        plt.savefig(source + finishTime + "/acurracy.png")

        print('Testing ...')
        # file validation picture
        files = []
        # r=root, d=directories, f = files
        for r, d, f in os.walk("Images/Test"):
            for file in f:
                if '.jpg' in file:
                    files.append(os.path.join(r, file))

        Y_pred = model.predict_generator(validation_generator,
                                         len(files) // Batch + 1)

        y_pred = np.argmax(Y_pred, axis=1)
        y_test = validation_generator.classes

        print(y_pred)
        print('Confusion Matrix')
        print(confusion_matrix(y_test, y_pred))
        print('Classification Report')
        target_names = train_generator.class_indices.keys()
        resultString = classification_report(y_test,
                                             y_pred,
                                             target_names=target_names)
        print(resultString)
        modelAcc = accuracy_score(y_test, y_pred)
        n = text_file.write(resultString + '\n')

        # save all model model
        # model.save(source+finishTime+'/Inception3.'+str("{:.2f}".format(modelAcc))+'.'+str(Epoch)+'.'+str(Batch)+'.'+Aug+'.h5')

        # save model weight
        model.save_weights(source + finishTime + '/Inception3.Weight.' +
                           str("{:.2f}".format(modelAcc)) + '.' + str(Epoch) +
                           '.' + str(Batch) + '.' + Aug + '.h5')

        # save model layers
        model_yaml = model.to_yaml()
        with open(
                source + finishTime + '/Inception3.Architecture.' +
                str("{:.2f}".format(modelAcc)) + '.' + str(Epoch) + '.' +
                str(Batch) + '.' + Aug + '.yaml', 'w') as yaml_file:
            yaml_file.write(model_yaml)

    text_file.close()
                                              stratify=Y_trnval)

print('trn.shape', X_trn.shape, Y_trn.shape)
print('val.shape', X_val.shape, Y_val.shape)
print('tst.shape', X_tst.shape, Y_tst.shape)

# model construction (keras)
if model_id == 1:
    base_model = VGG19(weights='imagenet', pooling='avg', include_top=False)
elif model_id == 2:
    base_model = InceptionV3(weights='imagenet',
                             pooling='avg',
                             include_top=False)
elif model_id == 3:
    base_model = ResNet50V2(weights='imagenet',
                            pooling='avg',
                            include_top=False)
elif model_id == 4:
    base_model = InceptionResNetV2(weights='imagenet',
                                   pooling='avg',
                                   include_top=False)

predictions = Dense(8, activation='softmax')(base_model.output)

model = Model(inputs=base_model.input, outputs=predictions)
#model.summary()

# data augmentation and scaling
data_gen_args = dict(rotation_range=360,
                     width_shift_range=0.15,
                     height_shift_range=0.15,
Ejemplo n.º 29
0
# continue antrenamentul. Daca nu exista, initializeaza reteaua neur(on)ala
if os.path.isfile(args["model"]):
    while True:
        choice = input(
            "Acest model exista deja. Vreti sa continuati antrenamentul modelului? (y/n): "
        )
        if choice in ("y", "Y"):
            model = load_model(args["model"])
            break
        elif choice in ("n", "N"):
            sys.exit(0)

else:
    # initializeaza si compileaza modelul ResNet-50 v2 (foloseste transfer learning)
    res_net = ResNet50V2(weights="imagenet",
                         input_shape=(128, 64, 3),
                         include_top=False,
                         pooling="max")
    model = Sequential()
    model.add(res_net)
    model.add(Dense(units=120, activation="relu"))
    model.add(Dense(units=120, activation="relu"))
    model.add(Dense(units=2, activation="softmax"))
    model.compile(optimizer=Adam(lr=0.0001),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])
    model.summary()

# pregatiri pentru salvarea modelului (reteaua neurala) pe disc
# dupa fiecare epoca de antrenament si notarea parametrilor
# fiecarei epoci intr-un fisier csv
filepath = (os.path.basename(args["model"]) +
Ejemplo n.º 30
0
bboxes, polygon_labels = process()

# Define GCN models
epochs_per_image = 10
gcn_models = [None] * epochs_per_image

# Run stochastic training
for image in range(len(bboxes)):
    print("Training image {0} of {1}".format(image + 1, len(bboxes)))
    # Process bounding box
    bbox, polygon_points = bboxes[image], polygon_labels[image]
    resized_bb = cv2.resize(bbox, (224, 224), interpolation=cv2.INTER_AREA)
    resized_bb_exp = preprocess_input(np.expand_dims(resized_bb, axis=0))

    # Compute feature map
    resnet_model = ResNet50V2(weights='imagenet')
    embedding_model = Model(inputs=resnet_model.input,
                            outputs=resnet_model.get_layer('post_relu').output)
    feature_map = embedding_model.predict(resized_bb_exp)

    # Define graph
    N = 45
    G = nx.Graph()
    G.add_nodes_from(range(N))
    for i in range(N):
        if i - 2 < 0:
            G.add_edge(N + (i - 2), i)
        else:
            G.add_edge((i - 2), i)
        if i - 1 < 0:
            G.add_edge(N + (i - 1), i)