Пример #1
0
def load_images(data_type):
    dataset = []
    path = "dataset/" + data_type + "/images"
    categories = os.listdir(path)

    for category in categories:
        current_path = path + "/" + category
        class_num = categories.index(category)
        model = DenseNet169(include_top=False, input_shape=(224, 224, 3))

        for image_path in tqdm(os.listdir(current_path)):
            img = image.load_img(current_path + "/" + image_path,
                                 target_size=(224, 224))
            if data_type == "train":
                for i in augment(img):
                    img_data = image.img_to_array(i)
                    img_data = np.expand_dims(img_data, axis=0)
                    img_data = preprocess_input(img_data)
                    features = model.predict(img_data)
                    dataset.append([features, class_num])
            else:
                img_data = image.img_to_array(img)
                img_data = np.expand_dims(img_data, axis=0)
                img_data = preprocess_input(img_data)
                features = model.predict(img_data)
                dataset.append([features, class_num])

    print("Loaded " + data_type)
    if data_type == "train":
        random.shuffle(dataset)
    return dataset
Пример #2
0
def loadPretrainedWeights():
    pretrained_weights={}

    pretrained_weights['vgg16']=VGG16(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['vgg19']=VGG19(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['resnet50']=ResNet50(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['inceptionv3']=InceptionV3(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['inception-resentv2']=InceptionResNetV2(weights='imagenet', include_top=False,pooling='avg')


    pretrained_weights['xception']=Xception(weights='imagenet', include_top=False,pooling='avg')

    pretrained_weights['densenet121']=DenseNet121(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['densenet169']=DenseNet169(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['densenet201']=DenseNet201(weights='imagenet', include_top=False,pooling='avg')
    pretrained_weights['mobilenet']=MobileNet(weights='imagenet', include_top=False,pooling='avg')


  #N retrained_weights['nasnetlarge']=NASNetLarge(weights='imagenet', include_top=False,pooling='avg',input_shape = (224, 224, 3))
  #N pretrained_weights['nasnetmobile']=NASNetMobile(weights='imagenet', include_top=False,pooling='avg')



    
  #N  pretrained_weights['mobilenetV2']=MobileNetV2(weights='imagenet', include_top=False,pooling='avg')
    
    return pretrained_weights
Пример #3
0
def genetic(encodings, category):
    cnn = DenseNet169(include_top=False, input_shape=(224, 224, 3))
    image_classifier = load_model('image_model.h5')

    def f(X):
        sum = X[0] * encodings[0]
        for i in range(1, len(X)):
            sum += X[i] * encodings[i]

        decoded_image = (decode_image(sum) * 255).astype(np.uint8)
        decoded_image = Image.fromarray(decoded_image)
        decoded_image = decoded_image.resize((224, 224))
        decoded_image = np.expand_dims(decoded_image, axis=0)
        decoded_image = preprocess_input(decoded_image)
        features = cnn.predict(decoded_image)
        answer = image_classifier.predict(features)[0][category]
        return -answer

    varbound = np.array([[0, 1]] * 10)

    model = ga(function=f,
               dimension=10,
               variable_type='real',
               variable_boundaries=varbound)
    model.run()
    return model.output_dict['variable']
Пример #4
0
def densenet169():
    base_model = DenseNet169(include_top=False)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1, activation='sigmoid')(x)
    model = Model(inputs=base_model.input, outputs=x)
    model.compile(optimizer='adam', loss='binary_crossentropy')
    return model
Пример #5
0
def create_model_1(trainable=False):
    model = DenseNet169(input_shape=(96, 96, 3), include_top=False)

    for layer in model.layers:
        layer.trainable = trainable

    x = model.layers[-1].output
    x = Conv2D(4, kernel_size=3, name="coords")(x)
    x = Reshape((4, ))(x)

    return Model(inputs=model.input, outputs=x)
Пример #6
0
def get_model(architecture, iteracion, models_info, pipeline):

    print("="*len(architecture))
    print(architecture)
    print("="*len(architecture))

    if iteracion > 0:
        base_model = models_info[architecture]['model_memory']

    if architecture == 'InceptionV3':
        from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
        if iteracion == 0:
            base_model = InceptionV3(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'InceptionV4':
        from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
        if iteracion == 0:
            base_model = InceptionResNetV2(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'ResNet50':
        from tensorflow.keras.applications.resnet import ResNet50, preprocess_input
        if iteracion == 0:
            base_model = ResNet50(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'ResNet101':
        from tensorflow.keras.applications.resnet import ResNet101, preprocess_input
        if iteracion == 0:
            base_model = ResNet101(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'ResNet152':
        from tensorflow.keras.applications.resnet import ResNet152, preprocess_input
        if iteracion == 0:
            base_model = ResNet152(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'DenseNet121':
        from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
        if iteracion == 0:
            base_model = DenseNet121(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'DenseNet169':
        from tensorflow.keras.applications.densenet import DenseNet169, preprocess_input
        if iteracion == 0:
            base_model = DenseNet169(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'DenseNet201': 
        from tensorflow.keras.applications.densenet import DenseNet201, preprocess_input
        if iteracion == 0:
            base_model = DenseNet201(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'NASNetLarge': 
        from tensorflow.keras.applications.nasnet import NASNetLarge, preprocess_input
        if iteracion == 0:
            base_model = NASNetLarge(weights=pipeline['weights'],include_top=False,input_shape=(pipeline['img_height'], pipeline['img_width'], 3))
    if architecture == 'Xception':
        from tensorflow.keras.applications.xception import Xception, preprocess_input
        if iteracion == 0:
            base_model = Xception(weights=pipeline['weights'], include_top=False, input_shape=(pipeline['img_height'], pipeline['img_width'], 3))

    return base_model, preprocess_input
Пример #7
0
def dense_net_169(input_shape) -> Model:
    inputs = Input(shape=input_shape)

    base_model = DenseNet169(
        include_top=False,
        weights=None,
        input_tensor=inputs,
        input_shape=input_shape
    )

    a = tail_block(base_model.output, "root")
    b = tail_block(base_model.output, "vowel")
    c = tail_block(base_model.output, "consonant")

    head_root = Dense(168, activation='softmax', name='root')(a)
    head_vowel = Dense(11, activation='softmax', name='vowel')(b)
    head_consonant = Dense(7, activation='softmax', name='consonant')(c)

    return Model(inputs=inputs, outputs=[head_root, head_vowel, head_consonant])
Пример #8
0
def combine_images(variables, encodings):
    cnn = DenseNet169(include_top=False, input_shape=(224, 224, 3))
    image_classifier = load_model('image_model.h5')

    sum = variables[0] * encodings[0]
    for i in range(1, len(variables)):
        sum += variables[i] * encodings[i]

    decoded_image = (decode_image(sum) * 255).astype(np.uint8)
    decoded_image = Image.fromarray(decoded_image)
    decoded_image = decoded_image.resize((224, 224))
    plt.imshow(decoded_image)
    plt.show()

    decoded_image = np.expand_dims(decoded_image, axis=0)
    decoded_image = preprocess_input(decoded_image)
    features = cnn.predict(decoded_image)
    prediction = image_classifier.predict(features)
    print("Prediction:", prediction)
Пример #9
0
    def dense169(self):
        '''
        DenseNet169(初期値Imagenet)
        '''

        base_model = DenseNet169(include_top=False,
                                 weights='imagenet',
                                 input_shape=(self.h, self.w, self.ch))
        x = GlobalAveragePooling2D()(base_model.output)
        x = Dense(512, kernel_initializer='he_normal')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        if self.classes == 1:
            outputs = Dense(self.classes,
                            kernel_initializer='he_normal',
                            activation='relu')(x)
        else:
            outputs = Dense(self.classes,
                            kernel_initializer='he_normal',
                            activation='softmax')(x)
        model = Model(inputs=base_model.input, outputs=outputs)

        return model
Пример #10
0
    def get_base_model(self, name='vgg16'):
        print('using model : ', name)
        if name == 'mobilenet_v2':
            from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
            base_model = MobileNetV2(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'mobilenet':
            from tensorflow.keras.applications.mobilenet import MobileNet
            base_model = MobileNet(input_shape=(self.IMAGE_SIZE,
                                                self.IMAGE_SIZE, 3),
                                   include_top=False,
                                   weights='imagenet')
        elif name == 'densenet121':
            from tensorflow.keras.applications.densenet import DenseNet121
            base_model = DenseNet121(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'densenet169':
            from tensorflow.keras.applications.densenet import DenseNet169
            base_model = DenseNet169(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'densenet201':
            from tensorflow.keras.applications.densenet import DenseNet201
            base_model = DenseNet201(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'inception_resnet_v2':
            from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
            base_model = InceptionResNetV2(input_shape=(self.IMAGE_SIZE,
                                                        self.IMAGE_SIZE, 3),
                                           include_top=False,
                                           weights='imagenet')
        elif name == 'inception_v3':
            from tensorflow.keras.applications.inception_v3 import InceptionV3
            base_model = InceptionV3(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'nasnet_large':
            from tensorflow.keras.applications.nasnet import NASNetLarge
            base_model = NASNetLarge(input_shape=(self.IMAGE_SIZE,
                                                  self.IMAGE_SIZE, 3),
                                     include_top=False,
                                     weights='imagenet')
        elif name == 'nasnet_mobile':
            from tensorflow.keras.applications.nasnet import NASNetMobile
            base_model = NASNetMobile(input_shape=(self.IMAGE_SIZE,
                                                   self.IMAGE_SIZE, 3),
                                      include_top=False,
                                      weights='imagenet')
        elif name == 'resnet50':
            from tensorflow.keras.applications.resnet50 import ResNet50
            base_model = ResNet50(input_shape=(self.IMAGE_SIZE,
                                               self.IMAGE_SIZE, 3),
                                  include_top=False,
                                  weights='imagenet')
        elif name == 'xception':
            from tensorflow.keras.applications.xception import Xception
            base_model = Xception(input_shape=(self.IMAGE_SIZE,
                                               self.IMAGE_SIZE, 3),
                                  include_top=False,
                                  weights='imagenet')
        elif name == 'vgg19':
            from tensorflow.keras.applications.vgg19 import VGG19
            base_model = VGG19(input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE,
                                            3),
                               include_top=False,
                               weights='imagenet')
        else:
            from tensorflow.keras.applications.vgg16 import VGG16
            # 采用VGG16为基本模型,include_top为False,表示FC层是可自定义的,抛弃模型中的FC层;该模型会在~/.keras/models下载基本模型
            base_model = VGG16(input_shape=(self.IMAGE_SIZE, self.IMAGE_SIZE,
                                            3),
                               include_top=False,
                               weights='imagenet')

        # self.preprocess_input = preprocess_input
        return base_model
Пример #11
0
def get_model(architecture, iteracion, models_info, pipeline):

    print("=" * len(architecture))
    print(architecture)
    print("=" * len(architecture))

    if iteracion > 0 and not pipeline["restart_weights"]:
        print("USING MODELS FROM MEMORY")
        base_model = models_info[architecture]['model_memory']
        print("OK - USING MODELS FROM MEMORY")

    if architecture == 'InceptionV3':
        from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input

        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = InceptionV3(weights=pipeline['weights'],
                                     include_top=False,
                                     input_shape=(pipeline['img_height'],
                                                  pipeline['img_width'], 3))
            print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}")

    if architecture == 'InceptionV4':
        from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input

        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = InceptionResNetV2(weights=pipeline['weights'],
                                           include_top=False,
                                           input_shape=(pipeline['img_height'],
                                                        pipeline['img_width'],
                                                        3))
            print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}")

    if architecture == 'ResNet50':
        from tensorflow.keras.applications.resnet import ResNet50, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = ResNet50(weights=pipeline['weights'],
                                  include_top=False,
                                  input_shape=(pipeline['img_height'],
                                               pipeline['img_width'], 3))
    if architecture == 'ResNet101':
        from tensorflow.keras.applications.resnet import ResNet101, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = ResNet101(weights=pipeline['weights'],
                                   include_top=False,
                                   input_shape=(pipeline['img_height'],
                                                pipeline['img_width'], 3))

    if architecture == 'ResNet152':
        from tensorflow.keras.applications.resnet import ResNet152, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = ResNet152(weights=pipeline['weights'],
                                   include_top=False,
                                   input_shape=(pipeline['img_height'],
                                                pipeline['img_width'], 3))
            print(f"OK - RESTARTING WEIGHTS FROM IMAGENET FOR {architecture}")

    if architecture == 'DenseNet121':
        from tensorflow.keras.applications.densenet import DenseNet121, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = DenseNet121(weights=pipeline['weights'],
                                     include_top=False,
                                     input_shape=(pipeline['img_height'],
                                                  pipeline['img_width'], 3))
    if architecture == 'DenseNet169':
        from tensorflow.keras.applications.densenet import DenseNet169, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = DenseNet169(weights=pipeline['weights'],
                                     include_top=False,
                                     input_shape=(pipeline['img_height'],
                                                  pipeline['img_width'], 3))
    if architecture == 'DenseNet201':
        from tensorflow.keras.applications.densenet import DenseNet201, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = DenseNet201(weights=pipeline['weights'],
                                     include_top=False,
                                     input_shape=(pipeline['img_height'],
                                                  pipeline['img_width'], 3))
    if architecture == 'NASNetLarge':
        from tensorflow.keras.applications.nasnet import NASNetLarge, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = NASNetLarge(weights=pipeline['weights'],
                                     include_top=False,
                                     input_shape=(pipeline['img_height'],
                                                  pipeline['img_width'], 3))
    if architecture == 'Xception':
        from tensorflow.keras.applications.xception import Xception, preprocess_input
        if iteracion == 0 or pipeline["restart_weights"]:
            base_model = Xception(weights=pipeline['weights'],
                                  include_top=False,
                                  input_shape=(pipeline['img_height'],
                                               pipeline['img_width'], 3))

    return base_model, preprocess_input
Пример #12
0
#create model
if not args.load_model and not args.mode == 'finetune':
  # create the base pre-trained model
  if args.pretrained_model: 
    base_model = keras.models.load_model(args.pretrained_model)
  elif args.net == 'inception_v3':
    base_model = InceptionV3(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'xception':
    base_model = Xception(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'resnet_50':
    base_model = ResNet50(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'densenet_121':
    base_model = DenseNet121(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'densenet_169':
    base_model = DenseNet169(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'densenet_201':
    base_model = DenseNet201(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'mobilenet_v2':
    base_model = MobileNetV2(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'nasnetlarge':
    base_model = NASNetLarge(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'nasnetmobile':
    base_model = NASNetMobile(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  elif args.net == 'inceptionresnet_v2':
    base_model = InceptionResNetV2(input_shape=(dim, dim, 3), weights='imagenet', include_top=False)
  else:
    print('Not supported network type')
    sys.exit()

  x = base_model.output
plt.plot(model_history9.history['val_auc'])
plt.title('Model AUC')
plt.ylabel('AUC')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left', bbox_to_anchor=(1,1))
plt.show()

"""# **DENSENET 169**

---


"""

base_model22 = DenseNet169(input_shape=(224,224,3), 
                         include_top=False,
                         weights="imagenet")

for layer in base_model22.layers:
    layer.trainable=False

model22=Sequential()
model22.add(base_model22)
model22.add(Dropout(0.5))
model22.add(Flatten())
model22.add(BatchNormalization())
model22.add(Dense(64,kernel_initializer='he_uniform'))
model22.add(BatchNormalization())
model22.add(Activation('relu'))
model22.add(Dropout(0.5))
model22.add(Dense(64,kernel_initializer='he_uniform'))
Пример #14
0
##############################
train_gen = TripletImageGenerator(train_df, preprocess_input, FACE_DEFAULT_SHAPE, is_aug=True)
valid_gen = TripletImageGenerator(valid_df, preprocess_input, FACE_DEFAULT_SHAPE, is_aug=False)
train_dataset = tf.data.Dataset.from_generator(train_gen.generator, 
                                               output_types=TripletImageGenerator.OUTPUT_TYPES,
                                               output_shapes=TripletImageGenerator.OUTPUT_SHAPES
                                              ).repeat().shuffle(buffer_size=BATCH_SIZE*100).batch(batch_size=BATCH_SIZE)

valid_dataset = tf.data.Dataset.from_generator(valid_gen.generator, 
                                               output_types=TripletImageGenerator.OUTPUT_TYPES,
                                               output_shapes=TripletImageGenerator.OUTPUT_SHAPES
                                              ).repeat().batch(batch_size=BATCH_SIZE_VALID)
##############################
#  모델 생성
##############################
base_model = DenseNet169(include_top=False, input_shape = FACE_DEFAULT_SHAPE + (3,))
base_model_output = GlobalAveragePooling2D()(base_model.output)
base_model_output = Dense(512)(base_model_output)
base_model_output = LeakyReLU(alpha=0.1)(base_model_output)
base_model_output = Dense(128)(base_model_output)
normalize = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='normalize')
base_model_output = normalize(base_model_output)
base_model = Model(base_model.input, base_model_output)
inp_shape = K.int_shape(base_model.input)[1:]
input_a = Input( inp_shape,  name='anchor')
input_p = Input( inp_shape,  name='positive')
input_n = Input( inp_shape,  name='negative')
encoded_anchor = base_model(input_a)
encoded_positive = base_model(input_p)
encoded_negative = base_model(input_n)
merged_vector = Concatenate(axis=-1)([encoded_anchor, encoded_positive, encoded_negative])
Пример #15
0
def get_densenet(classes=9,
                 depth=121,
                 input_shape=(224, 224, 3),
                 base_layer_trainable=False):
    from tensorflow.keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
    assert depth in [121, 169, 201]
    if depth == 121:
        base_model = DenseNet121(include_top=False, input_shape=input_shape)
        for layer in base_model.layers:
            layer.trainable = base_layer_trainable
        head_model = KL.GlobalMaxPool2D()(base_model.output)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='0000',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='1111',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(classes, activation='softmax',
                              name='3333')(head_model)
        model = KM.Model(inputs=base_model.input, outputs=head_model)
        return model

    elif depth == 169:
        base_model = DenseNet169(include_top=False, input_shape=input_shape)
        for layer in base_model.layers:
            layer.trainable = base_layer_trainable
        head_model = KL.GlobalMaxPool2D()(base_model.output)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='00',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='01',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(classes, activation='softmax',
                              name='11')(head_model)
        model = KM.Model(inputs=base_model.input, outputs=head_model)
        return model

    else:
        base_model = DenseNet201(include_top=False, input_shape=input_shape)
        for layer in base_model.layers:
            layer.trainable = base_layer_trainable
        head_model = KL.GlobalMaxPool2D()(base_model.output)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='0000',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(1024,
                              activation='relu',
                              name='1111',
                              kernel_initializer='he_uniform')(head_model)
        head_model = KL.Dropout(0.5)(head_model)
        head_model = KL.Dense(classes, activation='softmax',
                              name='3333')(head_model)
        model = KM.Model(inputs=base_model.input, outputs=head_model)
        return model
Пример #16
0
 def download_for_url(self, path: str, **kwargs):
     """
     Download the file at the given URL
     :param path:  the path to download
     :param kwargs:  various kwargs for customizing the underlying behavior of
     the model download and setup
     :return: the absolute path to the model
     """
     path_split = path.split('/')
     type = path_split[0]
     weights_file = path_split[1]
     include_top = 'no_top' in weights_file
     if type == 'vgg19':
         ret = VGG19(include_top=include_top, **kwargs)
     elif type == 'vgg16':
         ret = VGG16(include_top=include_top, **kwargs)
     elif type == 'resnet50':
         ret = ResNet50(include_top=include_top, **kwargs)
     elif type == 'resnet101':
         ret = ResNet101(include_top=include_top, **kwargs)
     elif type == 'resnet152':
         ret = ResNet152(include_top=include_top, **kwargs)
     elif type == 'resnet50v2':
         ret = ResNet50V2(include_top=include_top, **kwargs)
     elif type == 'resnet101v2':
         ret = ResNet101V2(include_top=include_top, **kwargs)
     elif type == 'resnet152v2':
         ret = ResNet152V2(include_top=include_top, **kwargs)
     elif type == 'densenet121':
         ret = DenseNet121(include_top=include_top)
     elif type == 'densenet169':
         ret = DenseNet169(include_top=include_top, **kwargs)
     elif type == 'densenet201':
         ret = DenseNet201(include_top=include_top, **kwargs)
     elif type == 'inceptionresnetv2':
         ret = InceptionResNetV2(include_top=include_top, **kwargs)
     elif type == 'efficientnetb0':
         ret = EfficientNetB0(include_top=include_top, **kwargs)
     elif type == 'efficientnetb1':
         ret = EfficientNetB1(include_top=include_top, **kwargs)
     elif type == 'efficientnetb2':
         ret = EfficientNetB2(include_top=include_top, **kwargs)
     elif type == 'efficientnetb3':
         ret = EfficientNetB3(include_top=include_top, **kwargs)
     elif type == 'efficientnetb4':
         ret = EfficientNetB4(include_top=include_top, **kwargs)
     elif type == 'efficientnetb5':
         ret = EfficientNetB5(include_top=include_top, **kwargs)
     elif type == 'efficientnetb6':
         ret = EfficientNetB6(include_top=include_top, **kwargs)
     elif type == 'efficientnetb7':
         efficient_net = EfficientNetB7(include_top=include_top, **kwargs)
     elif type == 'mobilenet':
         ret = MobileNet(include_top=include_top, **kwargs)
     elif type == 'mobilenetv2':
         ret = MobileNetV2(include_top=include_top)
     #  MobileNetV3() missing 2 required positional arguments: 'stack_fn' and 'last_point_ch'
     #elif type == 'mobilenetv3':
     #    mobile_net = MobileNetV3(include_top=include_top, **kwargs)
     elif type == 'inceptionv3':
         ret = InceptionV3(include_top=include_top, **kwargs)
     elif type == 'nasnet':
         ret = NASNetLarge(include_top=include_top, **kwargs)
     elif type == 'nasnet_mobile':
         ret = NASNetMobile(include_top=include_top, **kwargs)
     elif type == 'xception':
         ret = Xception(include_top=include_top, **kwargs)
     model_path = os.path.join(keras_path, weights_file)
     ret.save(model_path)
     return model_path
Пример #17
0
    """
    lr = args.lr  #1e-3
    print('Learning rate: ', lr)
    return lr


model = models.Sequential()

if '121' in args.model:
    base_model = DenseNet121(weights=None,
                             include_top=False,
                             input_shape=(32, 32, 3),
                             pooling='avg')
elif '169' in args.model:
    base_model = DenseNet169(weights=None,
                             include_top=False,
                             input_shape=(32, 32, 3),
                             pooling='avg')
elif '201' in args.model:
    base_model = DenseNet201(weights=None,
                             include_top=False,
                             input_shape=(32, 32, 3),
                             pooling='avg')

#base_model.summary()

#pdb.set_trace()

#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
#model.add(layers.UpSampling2D((2,2)))
model.add(base_model)
Пример #18
0
    def model_Initializer(self):

        from tensorflow.keras.layers import Dense, Flatten
        from tensorflow.keras.models import Model
        import tensorflow as tf

        #Resources
        print("Num GPUs Available: ",
              len(tf.config.experimental.list_physical_devices('GPU')))
        print("Using Tensorflow : ", tf.__version__)

        # initializing the network model and excluding the last layer of network
        if self.MODEL == 'VGG16':
            from tensorflow.keras.applications.vgg16 import VGG16
            self.model = VGG16(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'VGG19':
            from tensorflow.keras.applications.vgg19 import VGG19
            self.model = VGG19(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'Xception':
            from tensorflow.keras.applications.xception import Xception
            self.model = Xception(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'ResNet50V2':
            from tensorflow.keras.applications.resnet_v2 import ResNet50V2
            self.model = ResNet50V2(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'ResNet101V2':
            from tensorflow.keras.applications.resnet_v2 import ResNet101V2
            self.model = ResNet101V2(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'ResNet152V2':
            from tensorflow.keras.applications.resnet_v2 import ResNet152V2
            self.model = ResNet152V2(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'InceptionV3':
            from tensorflow.keras.applications.inception_v3 import InceptionV3
            self.model = InceptionV3(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'InceptionResNetV2':
            from tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2
            self.model = InceptionResNetV2(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'MobileNetV2':
            from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2
            self.model = MobileNetV2(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'DenseNet121':
            from tensorflow.keras.applications.densenet import DenseNet121
            self.model = DenseNet121(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'DenseNet169':
            from tensorflow.keras.applications.densenet import DenseNet169
            self.model = DenseNet169(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        if self.MODEL == 'DenseNet201':
            from tensorflow.keras.applications.densenet import DenseNet201
            self.model = DenseNet201(
                input_shape=self.IMAGE_SIZE + [3],
                weights='imagenet',  #using pretrained imagenet weights
                include_top=False)  #excluding the last layer of network

        # Freezing the layes of the network
        for layer in self.model.layers:
            layer.trainable = False

        #flatterning the last layer
        self.x = Flatten()(self.model.output)

        #Created a dense layer for output
        self.outlayers = Dense(self.count_output_classes,
                               activation='softmax')(self.x)

        #Binding pretrained layers with custom output layer
        self.model = Model(inputs=self.model.input, outputs=self.outlayers)

        #Compile the Model
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
Пример #19
0
K.set_image_data_format('channels_last')  # TF dimension ordering in this code

IMG_SIZE = 2048
BATCH_SIZE = 1
LRATE = 0.1e-4
JOBID = 'r_' + str(random.randint(1, 1000000))
WUP = 5

if len(sys.argv) == 2:
    JOBID = sys.argv[1]

ROOT = '/mnt/bb/$USERID'

#load Keras model
model = DenseNet169(weights=None,
                    include_top=False,
                    input_shape=(IMG_SIZE, IMG_SIZE, 3))

#Adding custom Layers
x = model.output
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dropout(0.5)(x)
predictions = tf.keras.layers.Dense(1, activation="sigmoid")(x)

# creating the final model
modelFinal = tf.keras.Model(inputs=model.input, outputs=predictions)

# compile the model
modelFinal.compile(loss="binary_crossentropy",
                   optimizer=hvd.DistributedOptimizer(
                       tf.keras.optimizers.Adam(lr=LRATE * hvd.size())),