示例#1
0
    """
    model = cnn_sample(in_shape=(h, w, 3), num_classes=num_classes)
    
    model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['categorical_accuracy'])

    bind_model(model)
    
    model = NASNetLarge(input_shape=(h, w, 3), include_top=True, classes = 4, weights=None)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)                    # optional optimization
    sgd = optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    """
    ##
    
    model = Xception(input_shape=(h, w, 3), include_top=True, classes = 4, weights=None)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)                    # optional optimization
    sgd = optimizers.SGD(lr=learning_rate, momentum=0.9, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    
    """
    model = cnn_sample(in_shape=(h, w, 3), num_classes=4)
    adam = optimizers.Adam(lr=learning_rate, decay=1e-5)
    model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['categorical_accuracy'])
    bind_model(model)
    """
    if config.pause:  ## test mode일 때
        print('Inferring Start...')
        nsml.paused(scope=locals())
示例#2
0
def load_model():
    global model
    global graph
    model = Xception(weights="imagenet")
    graph = K.get_session().graph
示例#3
0
文件: model.py 项目: stoplime/kaggle
    def create_model(self, model_type='xception', load_weights=None):
        if (model_type == 'inceptionv3' or model_type == 1):
            base = InceptionV3(include_top=False,
                               weights='imagenet',
                               input_tensor=self.input_tensor,
                               classes=self.output_size,
                               pooling='avg')
            model_name = 'inceptionv3'
            pred = base.output
        elif (model_type == 'resnet50' or model_type == 2):
            base = ResNet50(include_top=False,
                            weights='imagenet',
                            input_tensor=self.input_tensor,
                            classes=self.output_size,
                            pooling='avg')
            model_name = 'resnet50'
            pred = base.output
        elif (model_type == 'vgg19' or model_type == 3):
            base = VGG19(include_top=False,
                         weights='imagenet',
                         input_tensor=self.input_tensor,
                         classes=self.output_size,
                         pooling='avg')
            model_name = 'vgg19'
            pred = base.output
        elif (model_type == 'vgg16' or model_type == 4):
            base = VGG16(include_top=False,
                         weights='imagenet',
                         input_tensor=self.input_tensor,
                         classes=self.output_size,
                         pooling='avg')
            model_name = 'vgg16'
            pred = base.output
        elif (model_type == 'resnet152' or model_type == 5):
            resbuild = ResnetBuilder()
            base = resbuild.build_resnet_152(self.input_shape,
                                             self.output_size)
            model_name = 'resnet152'
            pred = base.output
        elif (model_type == 'resnet50MOD' or model_type == 6):
            resbuild = ResnetBuilder()
            base = resbuild.build_resnet_50(self.input_shape, self.output_size)
            model_name = 'resnet50MOD'
            pred = base.output
        elif (model_type == 'inceptionv3MOD' or model_type == 7):
            base = InceptionV3MOD(include_top=False,
                                  weights='imagenet',
                                  input_tensor=self.input_tensor,
                                  classes=self.output_size,
                                  pooling='avg')
            model_name = 'inceptionv3MOD'
            pred = base.output
        else:
            base = Xception(include_top=False,
                            weights='imagenet',
                            input_tensor=self.input_tensor,
                            classes=self.output_size,
                            pooling='avg')
            model_name = 'xception'
            pred = base.output
        pred = Dense(self.output_size,
                     activation='sigmoid',
                     name='predictions')(pred)
        self.model = Model(base.input, pred, name=model_name)

        if load_weights != None:
            self.model.load_weights(load_weights)

        for layer in base.layers:
            layer.trainable = True

        self.model.compile(loss=losses.binary_crossentropy,
                           optimizer='adam',
                           metrics=[FScore2])
示例#4
0

# In[47]:


# Training the Xception model for 10% training size
p = []
acc4 = []

start = time.time()

K.clear_session()

model2 = Xception(include_top=False, 
                  weights='imagenet', 
                  input_tensor=None, 
                  input_shape=input_shape, 
                  pooling=None, 
                  classes=num_classes)
augs2 = ImageDataGenerator(
    featurewise_center=True,
    featurewise_std_normalization=True,
    rotation_range=20,
    width_shift_range=0.2,
    height_shift_range=0.2,
    horizontal_flip=True)

augs2.fit(x_train_10)

annealer2 = ReduceLROnPlateau(monitor='val_loss',factor=0.2,
                            patience=3,min_lr=0.001)
tmodel2 = Sequential()
示例#5
0
    base_model = InceptionResNetV2(include_top=include_top,
                                   weights=weights,
                                   input_tensor=Input(shape=(299, 299, 3)))
    model = Model(input=base_model.input,
                  output=base_model.get_layer('custom').output)
    image_size = (299, 299)
elif model_name == "mobilenet":
    base_model = MobileNet(include_top=include_top,
                           weights=weights,
                           input_tensor=Input(shape=(224, 224, 3)),
                           input_shape=(224, 224, 3))
    model = Model(input=base_model.input,
                  output=base_model.get_layer('custom').output)
    image_size = (224, 224)
elif model_name == "xception":
    base_model = Xception(weights=weights)
    model = Model(input=base_model.input,
                  output=base_model.get_layer('avg_pool').output)
    image_size = (299, 299)
else:
    base_model = None

print("[INFO] successfully loaded base model and model...")

# path to training dataset
train_labels = os.listdir(train_path)

# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([tl for tl in train_labels])
def get_test_neural_net(type):
    model = None
    if type == 'mobilenet_small':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((128, 128, 3),
                          depth_multiplier=1,
                          alpha=0.25,
                          include_top=True,
                          weights='imagenet')
    elif type == 'mobilenet':
        from keras.applications.mobilenet import MobileNet
        model = MobileNet((224, 224, 3),
                          depth_multiplier=1,
                          alpha=1.0,
                          include_top=True,
                          weights='imagenet')
    elif type == 'mobilenet_v2':
        from keras.applications.mobilenetv2 import MobileNetV2
        model = MobileNetV2((224, 224, 3),
                            depth_multiplier=1,
                            alpha=1.4,
                            include_top=True,
                            weights='imagenet')
    elif type == 'resnet50':
        from keras.applications.resnet50 import ResNet50
        model = ResNet50(input_shape=(224, 224, 3),
                         include_top=True,
                         weights='imagenet')
    elif type == 'inception_v3':
        from keras.applications.inception_v3 import InceptionV3
        model = InceptionV3(input_shape=(299, 299, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'inception_resnet_v2':
        from keras.applications.inception_resnet_v2 import InceptionResNetV2
        model = InceptionResNetV2(input_shape=(299, 299, 3),
                                  include_top=True,
                                  weights='imagenet')
    elif type == 'xception':
        from keras.applications.xception import Xception
        model = Xception(input_shape=(299, 299, 3),
                         include_top=True,
                         weights='imagenet')
    elif type == 'densenet121':
        from keras.applications.densenet import DenseNet121
        model = DenseNet121(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'densenet169':
        from keras.applications.densenet import DenseNet169
        model = DenseNet169(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'densenet201':
        from keras.applications.densenet import DenseNet201
        model = DenseNet201(input_shape=(224, 224, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'nasnetmobile':
        from keras.applications.nasnet import NASNetMobile
        model = NASNetMobile(input_shape=(224, 224, 3),
                             include_top=True,
                             weights='imagenet')
    elif type == 'nasnetlarge':
        from keras.applications.nasnet import NASNetLarge
        model = NASNetLarge(input_shape=(331, 331, 3),
                            include_top=True,
                            weights='imagenet')
    elif type == 'vgg16':
        from keras.applications.vgg16 import VGG16
        model = VGG16(input_shape=(224, 224, 3),
                      include_top=False,
                      pooling='avg',
                      weights='imagenet')
    elif type == 'vgg19':
        from keras.applications.vgg19 import VGG19
        model = VGG19(input_shape=(224, 224, 3),
                      include_top=False,
                      pooling='avg',
                      weights='imagenet')
    return model
示例#7
0
def train(pooling="avg",
          num_units=1024,
          batch_size=2,
          name="test",
          drop_prob=0.,
          bonus=False,
          freeze=False):
    model_dir = os.path.join(DIRNAME, "models/{}".format(name))
    os.makedirs(model_dir, exist_ok=True)

    datagen = ImageDataGenerator(horizontal_flip=True,
                                 width_shift_range=0.2,
                                 height_shift_range=0.2,
                                 zoom_range=0.1,
                                 validation_split=0.2)

    train_generator = datagen.flow_from_directory(TRAIN_DIR, (300, 250),
                                                  batch_size=batch_size,
                                                  subset='training')

    valid_generator = datagen.flow_from_directory(TRAIN_DIR, (300, 250),
                                                  batch_size=batch_size,
                                                  subset='validation')

    if bonus:
        base_model = VGG16_Places365(include_top=False,
                                     weights='places',
                                     input_shape=(300, 250, 3),
                                     pooling=pooling)
    else:

        base_model = Xception(include_top=False,
                              weights="imagenet",
                              input_shape=(300, 250, 3),
                              pooling=pooling)

    x = base_model.output
    x = Dense(num_units, activation="relu")(x)
    x = Dropout(drop_prob)(x)
    predictions = Dense(15, activation="softmax")(x)

    model = Model(inputs=base_model.input, outputs=predictions)

    if freeze:
        for layer in base_model.layers:
            layer.trainable = False

    optimizer = SGD(lr=0.001, momentum=0.9, clipnorm=5.)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    tensorboard = TensorBoard(log_dir=model_dir,
                              batch_size=batch_size,
                              update_freq="batch")
    saver = ModelCheckpoint("{}/model.hdf5".format(model_dir),
                            verbose=1,
                            save_best_only=True,
                            monitor="val_acc",
                            mode="max")
    stopper = EarlyStopping(patience=20,
                            verbose=1,
                            monitor="val_acc",
                            mode="max")
    reduce_lr = ReduceLROnPlateau(monitor="loss",
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  min_lr=0.0001)

    model.fit_generator(
        train_generator,
        steps_per_epoch=train_generator.samples // batch_size + 1,
        validation_data=valid_generator,
        validation_steps=valid_generator.samples // batch_size + 1,
        verbose=2,
        epochs=50,
        callbacks=[tensorboard, saver, stopper, reduce_lr])
    print("Modelo {} treinado!".format(name))
示例#8
0
ImageFile.LOAD_TRUNCATED_IMAGES = True

from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True

import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
    tf.config.experimental.set_memory_growth(physical_device, True)

models = []
models = [
    NASNetLarge(weights='imagenet', include_top=False),
    InceptionV3(weights='imagenet', include_top=False),
    MobileNetV2(weights='imagenet', include_top=False),
    Xception(weights='imagenet', include_top=False),
    DenseNet201(weights='imagenet', include_top=False),
    InceptionResNetV2(weights='imagenet', include_top=False)
]
models_name = [
    'NASNetLarge', 'InceptionV3', 'MobileNetV2', 'Xception', 'DenseNet201',
    'InceptionResNetV2'
]

csv_ok = pd.read_csv('d:\\dane\\HARRISON\\data_list.txt', header=None)
for model_id in range(len(models)):
    base_model = models[model_id]
    #print(models_name[model_id])
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
示例#9
0
def _create_pretrained_model(config_dict, num_classes):
    #
    # extract relevant parts of configuration
    #
    num_dense_units_list = []
    base_model = config_dict['base_model']
    num_dense_layers = config_dict['num_dense_layers']
    for i in range(num_dense_layers):
        num_dense_units_list.append(config_dict['num_dense_units_' + str(i)])
    activation = config_dict['activation']
    optimizer = config_dict['optimizer']
    learning_rate = config_dict['learning_rate']

    #
    # load pre-trained model
    #
    if base_model == 'InceptionV3':
        pretrained_model = InceptionV3(weights='imagenet', include_top=False)  
    elif base_model == 'Xception':
        pretrained_model = Xception(weights='imagenet', include_top=False)
    elif base_model == 'ResNet50':
        pretrained_model = ResNet50(weights='imagenet', include_top=False)
    elif base_model == 'MobileNet':
        pretrained_model = MobileNet(weights='imagenet', input_shape=(224, 224,3), include_top=False)
    elif base_model == 'InceptionResNetV2':
        pretrained_model = InceptionResNetV2(weights='imagenet', include_top=False)
    else:
        print("invalid model: ", base_model)
    
    x = pretrained_model.output

    # for i, layer in enumerate(pretrained_model.layers):
    #    print(i, layer.name)    
    
    
    #
    # add fully connected layers
    #
    x = pretrained_model.output

    x = GlobalAveragePooling2D()(x)
    for i in range(num_dense_layers):
        x = Dense(num_dense_units_list[i], activation=activation)(x)
    predictions = Dense(num_classes, activation='softmax')(x)
    #
    # finish building combined model, lock parameters of pretrained part
    #
    model = Model(inputs=pretrained_model.input, outputs=predictions)
    for layer in pretrained_model.layers:
        layer.trainable = False
    if optimizer == 'SGD':
        opt = optimizers.SGD(lr=learning_rate)
    elif optimizer == 'Adam':
        opt = optimizers.Adam(lr=learning_rate)
    elif optimizer == 'RMSProp':
        opt = optimizers.RMSprop(lr=learning_rate)
    else:
        raise NotImplementedError("Unknown optimizer: {}".format(optimizer))
    # compile the model (should be done *after* setting layers to
    # non-trainable)
    # metrics='accuracy' causes the model to store and report accuracy (train
    # and validate)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    
    model.name = base_model    # to identify model in "unfreeze_layers() and "train()" function
    # print("successfuly created model: ", model.name)    
    
    return model
    label2class = dict()
    label2breed = dict()
    invertlabel2class = dict()

    for i, j in enumerate(unique_y):
        label2class[j] = i
        invertlabel2class[i] = j

    df_label_breed = df_train_resampled[["LABELS","BREED"]].drop_duplicates()

    for k,v in df_label_breed.iterrows():
        label2breed[v.LABELS] = v.BREED
    # load model
    #modelVGG16 = VGG16(weights='imagenet', include_top=False,
    #                   input_shape = (224,224,3))
    modelXception= Xception(weights='imagenet', include_top=False,
                       input_shape = (224,224,3))
    # train only the last layer
    for layer in modelXception.layers:
        layer.trainable = False
    #for layer in modelVGG16.layers:
    #    layer.trainable = False

    # adapt output to our case
    x = modelXception.output
    #x = modelVGG16.output
    x = Flatten()(x)
    #x = Dropout(0.4)(x)
    # let's add two fully-connected layer
    #x = Dense(2048, activation='relu')(x)
    #x = BatchNormalization()(x)
    #x = Dropout(0.4)(x)
示例#11
0
def unet(input_shape=(None, None, 3)):

    backbone = Xception(input_shape=input_shape,
                        weights='imagenet',
                        include_top=False)
    input = backbone.input
    start_neurons = 16

    conv4 = backbone.layers[121].output
    conv4 = LeakyReLU(alpha=0.1)(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(0.1)(pool4)

    # Middle
    convm = Conv2D(start_neurons * 32, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 32)
    convm = residual_block(convm, start_neurons * 32)
    convm = LeakyReLU(alpha=0.1)(convm)

    # 10 -> 20
    deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(0.1)(uconv4)

    uconv4 = Conv2D(start_neurons * 16, (3, 3),
                    activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    uconv4 = LeakyReLU(alpha=0.1)(uconv4)

    # 10 -> 20
    deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv4)
    conv3 = backbone.layers[31].output
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(0.1)(uconv3)

    uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    uconv3 = LeakyReLU(alpha=0.1)(uconv3)

    # 20 -> 40
    deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    conv2 = backbone.layers[21].output
    conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2)
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(0.1)(uconv2)
    uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    uconv2 = LeakyReLU(alpha=0.1)(uconv2)

    # 40 -> 80
    deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv2)
    conv1 = backbone.layers[11].output
    conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1)
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(0.1)(uconv1)
    uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    uconv1 = LeakyReLU(alpha=0.1)(uconv1)

    # 80 -> 160
    uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3),
                             strides=(2, 2),
                             padding="same")(uconv1)
    uconv0 = Dropout(0.1)(uconv0)
    uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv0)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    uconv0 = LeakyReLU(alpha=0.1)(uconv0)

    uconv0 = Dropout(0.1 / 2)(uconv0)
    output_layer = Conv2D(1, (1, 1), padding="same",
                          activation="sigmoid")(uconv0)

    model = Model(input, output_layer)
    model.name = 'u-xception'

    return model
示例#12
0
    def create_model():
        base_model = None
        if params["model"] == "InceptionV3":
            params["train_threshold"] = 249
            base_model = InceptionV3(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "xception":
            params["train_threshold"] = 106
            base_model = Xception(weights='imagenet',
                                  include_top=False,
                                  input_tensor=None,
                                  input_shape=(params["img_width"],
                                               params["img_height"], 3))
        elif params["model"] == "InceptionResNetV2":
            params["train_threshold"] = 727
            base_model = InceptionResNetV2(weights='imagenet',
                                           include_top=False,
                                           input_tensor=None,
                                           input_shape=(params["img_width"],
                                                        params["img_height"],
                                                        3))
        elif params["model"] == "DenseNet121":
            params["train_threshold"] = 403
            base_model = DenseNet121(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "DenseNet169":
            params["train_threshold"] = 571
            base_model = DenseNet169(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "DenseNet201":
            params["train_threshold"] = 683
            base_model = DenseNet201(weights='imagenet',
                                     include_top=False,
                                     input_tensor=None,
                                     input_shape=(params["img_width"],
                                                  params["img_height"], 3))
        elif params["model"] == "ResNet50":
            params["train_threshold"] = 140
            base_model = ResNet50(weights='imagenet',
                                  include_top=False,
                                  input_tensor=None,
                                  pooling=None,
                                  input_shape=(params["img_width"],
                                               params["img_height"], 3))
        else:
            print("unknown model")

        count = 0
        modelx = base_model.output

        while count < params["dense_num"]:
            count += 1
            string = "dense" + str(count)

            if "pool" in params[string]:
                if params[string]["pool"] == "avg_poolx":
                    modelx = GlobalAveragePooling2D(
                        name=params[string]["pool"])(modelx)

            modelx = Dense(params[string]["num"],
                           activation=params[string]["activation"])(modelx)

            if "dropout" in params[string]:
                modelx = Dropout(params[string]["dropout"])(modelx)

        model = Model(inputs=base_model.input, output=modelx)

        for layer in base_model.layers:
            layer.trainable = False

        model.compile(loss=params["loss"],
                      optimizer=params["phase1_optimizer"],
                      metrics=params["metrics"])

        return model
示例#13
0
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


nb_train_samples = get_nb_files(train_dir)  # 训练样本个数
nb_classes = len(glob.glob(train_dir + "/*"))  # 分类数
nb_val_samples = get_nb_files(test_dir)  # 验证集样本个数

if __name__ == "__main__":
    '''图片预处理(包括数据扩增)'''
    train_generator, validation_generator = image_preprocess()
    base_model = Xception(weights='imagenet',
                          include_top=False,
                          input_shape=(IM_WIDTH, IM_HEIGHT,
                                       3))  # 预先要下载no_top模型
    base_model.output
    '''加载base_model'''
    # 使用带有预训练权重的InceptionV3模型,但不包括顶层分类器
    len(base_model.layers)
    '''添加顶层分类器'''
    model = add_new_last_layer(base_model, nb_classes)  # 从基本no_top模型上添加新层
    '''训练顶层分类器'''
    setup_to_transfer_learn(model, base_model)
    #setup_to_transfer_learn(model)
    history_tl = model.fit_generator(
        train_generator,
        epochs=epoch_frezz,
        steps_per_epoch=nb_train_samples // batch_size,
        validation_data=validation_generator,
示例#14
0
X_test = X_test.astype('float32') / 255

# trainデータからvalidデータを分割
X_train, X_valid, y_train, y_valid = train_test_split(X_train,
                                                      y_train,
                                                      random_state=0,
                                                      test_size=0.2)
print(X_train.shape, y_train.shape, X_valid.shape, y_valid.shape)
"""モデルの構築
KerasのXceptionを使用する。
Argments:
    include_top: ネットワーク出力層の全結合層を除去(False)。
    early_stopping: 過学習を防ぐ関数。

"""
base_model = Xception(include_top=False, weights="imagenet", input_shape=None)
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation='relu')(x)
predictions = Dense(1)(x)
datagen = ImageDataGenerator(featurewise_center=False,
                             samplewise_center=False,
                             featurewise_std_normalization=False,
                             samplewise_std_normalization=False,
                             zca_whitening=False,
                             rotation_range=0,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             horizontal_flip=True,
                             vertical_flip=False)
示例#15
0
from keras.layers import *


class FrozenBatchNormalization(layers.BatchNormalization):
    def call(self, inputs, training=None):
        return super().call(inputs=inputs, training=False)


BatchNormalization = layers.BatchNormalization
layers.BatchNormalization = FrozenBatchNormalization

#Prepare the Model
HEIGHT = 299
WIDTH = 299
base_model = Xception(layers=layers,
                      weights='imagenet',
                      include_top=False,
                      input_shape=(HEIGHT, WIDTH, 3))

#undo the patch
layers.BatchNormalization = BatchNormalization


def build_finetune_model(base_model, dropout, fc_layers, num_classes):
    #for layer in base_model.layers:
    #    layer.trainable = False
    for layer in base_model.layers[:126]:
        layer.trainable = False
    for layer in base_model.layers[126:]:
        layer.trainable = True

    x = base_model.output
示例#16
0
#!/usr/bin/env python3
import os
import sys
from keras.applications.xception import Xception
from keras.applications.xception import preprocess_input, decode_predictions
from keras.preprocessing import image
import numpy as np
import json
from glob import glob

model = Xception(include_top=True, weights='imagenet')


def predict(img_path, pred_threshold):
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    # decode the results into a list of tuples (class, description, probability)
    # (one such list for each sample in the batch)
    # print('Predicted:', decode_predictions(preds, top=3)[0])
    decoded_preds = decode_predictions(preds, top=3)[0]
    for pred in decoded_preds:
        class_, desc, prob = pred
        if prob > pred_threshold:
            print("|".join([img_path, desc, str(prob)]))


def main():
示例#17
0
def extract_Xception(tensor):
    from keras.applications.xception import Xception, preprocess_input

    return Xception(weights="imagenet", include_top=False).predict(
        preprocess_input(tensor)
    )
示例#18
0
vgg19_model = VGG19(weights=vgg19_weights)
_get_predictions(vgg19_model)
from keras.applications.inception_v3 import InceptionV3

inception_weights = '../input/inceptionv3/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
inception_model = InceptionV3(weights=inception_weights)
_get_predictions(inception_model)
from keras.applications.resnet50 import ResNet50

resnet_weights = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
resnet_model = ResNet50(weights=resnet_weights)
_get_predictions(resnet_model)
from keras.applications.xception import Xception

xception_weights = '../input/xception/xception_weights_tf_dim_ordering_tf_kernels.h5'
xception_model = Xception(weights=xception_weights)
resnet50 = ResNet50(weights='imagenet', include_top=False)


def _get_features(img_path):
    img = image.load_img(img_path, target_size=(224, 224))
    img_data = image.img_to_array(img)
    img_data = np.expand_dims(img_data, axis=0)
    img_data = preprocess_input(img_data)
    resnet_features = resnet50.predict(img_data)
    return resnet_features


img_path = "../input/dogs-vs-cats-redux-kernels-edition/train/dog.2811.jpg"
resnet_features = _get_features(img_path)
features_representation_1 = resnet_features.flatten()
import cv2
from keras.models import Model
from keras.applications.xception import Xception
from keras.applications.xception import preprocess_input as xception_preprocess_input
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input as vgg16_preprocess_input
from keras.applications.vgg19 import VGG19
from keras.applications.vgg19 import preprocess_input as vgg19_preprocess_input
from preprocessing import list_pictures


#Organizing the dataset
image_dir = os.path.join('..', 'data', 'image_processed')
magnification_factors = ['40', '100', '200', '400']

model1 = Xception(weights='imagenet', include_top=False) #imports the Xception model and discards the last classification layer.
model2 = VGG16(weights='imagenet', include_top=False) #imports the VGG16 model and discards the last classification layer.
base_model = VGG19(weights='imagenet')
model3 = Model(inputs=base_model.input, outputs=base_model.get_layer('block4_pool').output) #imports the VGG19 model and discards the last classification layer.
models = {'xception': (model1, xception_preprocess_input),
          'vgg16': (model2, vgg16_preprocess_input),
          'vgg19': (model3, vgg19_preprocess_input)
          }


# Define a function to extract features using pre-trained network
def feature_extraction(model_name, img_path, magnification_factor, input_shape, out_path):
    features = []
    labels = []
    patients = []
    for img_dir in list_pictures(img_path, magnification_factor, ext='.npy'):
示例#20
0
  """
    #for layer in model.layers[:NB_IV3_LAYERS_TO_FREEZE]:
    #  layer.trainable = False
    for layer in model.layers[NB_IV3_LAYERS_TO_FREEZE:]:
        layer.trainable = True
    model.compile(optimizer=SGD(lr=0.01, momentum=0.99),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])


vgg_weights = 'vgg16_weights.h5'
top_model_weights_path = 'fc_model.h5'

#inputs = Input(shape=(150,150,3))
base_model = Xception(weights='imagenet',
                      include_top=False,
                      input_shape=(img_width, img_height, 3))
#base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width,img_width,3))
print('Model loaded.')
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(BatchNormalization())
top_model.add(Dense(120, W_regularizer=regularizers.l2(0.02)))
top_model.add(BatchNormalization())
top_model.add(Activation('softmax'))
#top_model.load_weights(top_model_weights_path)

# add the model on top of the convolutional base
#model.add(top_model)
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
示例#21
0
# from keras.applications.resnet50 import ResNet50
from keras.applications.xception import Xception
from keras.preprocessing import image
from keras.applications.xception import preprocess_input, decode_predictions
from keras.utils.vis_utils import plot_model
import numpy as np
# import matplotlib.pyplot as plt

# model = ResNet50(weights='imagenet')
model = Xception()
print(model.summary())
# plot_model(model, to_file='xception.png')
model = Xception(weights='imagenet')
img_path = 'elephant.jpg'
# img_path = 'parrot.jpg'
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
# plt.imshow(x / 255.)
# plt.show()
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=8)[0])
# img.show()
for pred in decode_predictions(preds, top=8)[0]:
    print(pred)
    model = Model(input=base_model.input, output=predictions)
    image_size = (299, 299)
elif cfg.model_name == "mobilenet":
    base_model = MobileNet(
        include_top=cfg.include_top,
        weights=cfg.weights,
        input_tensor=Input(shape=(224, 224, 3)),
        input_shape=(224, 224, 3),
    )
    x = base_model.output
    predictions = GlobalAveragePooling2D()(x)
    model = Model(input=base_model.input, output=predictions)
    image_size = (224, 224)

elif cfg.model_name == "xception":
    base_model = Xception(weights=cfg.weights)
    model = Model(input=base_model.input,
                  output=base_model.get_layer("avg_pool").output)
    model.summary()
    image_size = (299, 299)
else:
    base_model = None

print("[INFO] successfully loaded base model and model...")

# path to training dataset
train_labels = os.listdir(cfg.train_path)

# encode the labels
print("[INFO] encoding labels...")
le = LabelEncoder()
示例#23
0
def build_model(start_neurons):

    backbone = Xception(input_shape=(prm.img_h, prm.img_w, 3),
                        weights='imagenet',
                        include_top=False)
    input = backbone.input

    conv4 = backbone.layers[121].output
    conv4 = LeakyReLU(alpha=0.1)(conv4)
    pool4 = MaxPooling2D((2, 2))(conv4)
    pool4 = Dropout(0.1)(pool4)

    # middle
    convm = Conv2D(start_neurons * 32, (3, 3), activation=None,
                   padding="same")(pool4)
    convm = residual_block(convm, start_neurons * 32)
    convm = residual_block(convm, start_neurons * 32)
    convm = LeakyReLU(alpha=0.1)(convm)

    # 8 -> 16
    deconv4 = Conv2DTranspose(start_neurons * 16, (3, 3),
                              strides=(2, 2),
                              padding="same")(convm)
    uconv4 = concatenate([deconv4, conv4])
    uconv4 = Dropout(0.1)(uconv4)

    uconv4 = Conv2D(start_neurons * 16, (3, 3),
                    activation=None,
                    padding="same")(uconv4)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    uconv4 = residual_block(uconv4, start_neurons * 16)
    uconv4 = LeakyReLU(alpha=0.1)(uconv4)

    # 16 -> 32
    deconv3 = Conv2DTranspose(start_neurons * 8, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv4)
    conv3 = backbone.layers[31].output
    uconv3 = concatenate([deconv3, conv3])
    uconv3 = Dropout(0.1)(uconv3)

    uconv3 = Conv2D(start_neurons * 8, (3, 3), activation=None,
                    padding="same")(uconv3)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    uconv3 = residual_block(uconv3, start_neurons * 8)
    uconv3 = LeakyReLU(alpha=0.1)(uconv3)

    # 32 -> 64
    deconv2 = Conv2DTranspose(start_neurons * 4, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv3)
    conv2 = backbone.layers[21].output
    conv2 = ZeroPadding2D(((1, 0), (1, 0)))(conv2)
    uconv2 = concatenate([deconv2, conv2])

    uconv2 = Dropout(0.1)(uconv2)
    uconv2 = Conv2D(start_neurons * 4, (3, 3), activation=None,
                    padding="same")(uconv2)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    uconv2 = residual_block(uconv2, start_neurons * 4)
    uconv2 = LeakyReLU(alpha=0.1)(uconv2)

    # 64 -> 128
    deconv1 = Conv2DTranspose(start_neurons * 2, (3, 3),
                              strides=(2, 2),
                              padding="same")(uconv2)
    conv1 = backbone.layers[11].output
    conv1 = ZeroPadding2D(((3, 0), (3, 0)))(conv1)
    uconv1 = concatenate([deconv1, conv1])

    uconv1 = Dropout(0.1)(uconv1)
    uconv1 = Conv2D(start_neurons * 2, (3, 3), activation=None,
                    padding="same")(uconv1)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    uconv1 = residual_block(uconv1, start_neurons * 2)
    uconv1 = LeakyReLU(alpha=0.1)(uconv1)

    # 128 -> 256
    uconv0 = Conv2DTranspose(start_neurons * 1, (3, 3),
                             strides=(2, 2),
                             padding="same")(uconv1)
    uconv0 = Dropout(0.1)(uconv0)
    uconv0 = Conv2D(start_neurons * 1, (3, 3), activation=None,
                    padding="same")(uconv0)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    uconv0 = residual_block(uconv0, start_neurons * 1)
    uconv0 = LeakyReLU(alpha=0.1)(uconv0)

    uconv0 = Dropout(0.1 / 2)(uconv0)
    output_layer_noActi = Conv2D(1, (1, 1), padding="same",
                                 activation=None)(uconv0)
    output_layer = Activation('sigmoid')(output_layer_noActi)

    model = Model(inputs=input, outputs=output_layer_noActi)

    return model
model_dict = {}
model_dict['model name'] = "ResNet50"
model_dict['model'] = ResNet50(include_top=False, weights='imagenet')
model_dict['preprocess input'] = preprocess_input
# ExtractFeatures(model_dict, some_train_img_list, (224, 224, -1), "train")
# ExtractFeatures(model_dict, some_test_image_list, (224, 224, -1), "test")
# SaveMetaData(all_df, some_train_img_list, "train")
# SaveMetaData(all_df, some_test_image_list, "test")
model_dict.clear()

from keras.applications.xception import Xception
from keras.applications.xception import preprocess_input

model_dict['model name'] = "Xception"
model_dict['model'] = Xception(include_top=False, weights='imagenet')
model_dict['preprocess input'] = preprocess_input
# ExtractFeatures(model_dict, some_train_img_list, (299, 299, -1), "train")
# ExtractFeatures(model_dict, some_test_image_list, (299, 299, -1), "test")
model_dict.clear()

from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input

model_dict['model name'] = "InceptionV3"
model_dict['model'] = InceptionV3(include_top=False, weights='imagenet')
model_dict['preprocess input'] = preprocess_input
# ExtractFeatures(model_dict, some_train_img_list, (299, 299, -1), "train")
# ExtractFeatures(model_dict, some_test_image_list, (299, 299, -1), "test")
model_dict.clear()
示例#25
0
from keras.models import Model
from pickle import load
from keras.preprocessing.sequence import pad_sequences
import numpy as np

from tkinter import *
from PIL import ImageTk, Image 
from tkinter import filedialog 
from tkinter import messagebox
import keras

import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

fileName = 'xxx'
m = Xception()
imageModel = Model(inputs=m.layers[0].input, outputs=m.layers[-2].output)
mainModel = load_model('./Xception_model_4.h5')

tokenizer = open('tokenizer.pkl', 'rb')
tokenizer = load(tokenizer)

w2i = tokenizer.word_index
i2w = {j:i for i,j in w2i.items()}

root = Tk() 
root.title("Image Loader") 
root.resizable(width = True, height = True) 
root.minsize(1000, 700)

def getCaption(fileName):
示例#26
0
    def __init__(self):

        self.model = Xception(weights='imagenet', include_top=False)
        self.batch_size = 32
示例#27
0
def SSDXception_BN(input_shape, num_classes=2):
    """SSD300 + XCeption architecture.

    # Arguments
        input_shape: Shape of the input image,
            expected to be either (300, 300, 3) or (3, 300, 300)(not tested).
        num_classes: Number of classes including background.

    # References
        https://arxiv.org/abs/1512.02325
    """
    net = {}
    img_size = (input_shape[1], input_shape[0])
    input_tensor = Input(shape=input_shape)
    net['input'] = input_tensor

    xception_head = Xception(include_top=False, input_tensor=input_tensor)
    activation_layer_names = []

    for layer in xception_head.layers:
        # print(layer.name)
        if layer.outbound_nodes:
            net[layer.name] = layer.output
            layer.trainable = False
            # print(layer.name)
            if layer.name.startswith('activation'):
                activation_layer_names.append(layer.name)

    # print(activation_layer_names)

    # prev_size_layer_name = activation_layer_names[-10]
    net['pool5'] = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same',
                                name='pool5')(xception_head.output)
    # FC6
    net['fc6'] = AtrousConvolution2D(512, 3, 3, atrous_rate=(3, 3),
                                     activation='relu', border_mode='same',
                                     name='fc6')(net['pool5'])
    # x = Dropout(0.5, name='drop6')(x)
    # FC7
    # net['fc7'] = Convolution2D(1024, 1, 1, activation='relu',
    #                            border_mode='same', name='fc7')(net['fc6'])
    net['fc7'] = net['fc6']

    # net['fc7'] = xception_head.output

    # x = Dropout(0.5, name='drop7')(x)
    # Block 6
    net['conv6_1'] = wrap_with_bn(
        net,
        Convolution2D(256, 1, 1, activation='linear',
                      border_mode='same',
                      name='conv6_1')(net['fc7']),
        name='6_1')

    net['conv6_2'] = Convolution2D(512, 3, 3, subsample=(2, 2),
                                   activation='relu', border_mode='same',
                                   name='conv6_2')(net['conv6_1'])
    # Block 7
    net['conv7_1'] = wrap_with_bn(
        net,
        Convolution2D(128, 1, 1, activation='linear',
                                   border_mode='same',
                                   name='conv7_1')(net['conv6_2']),
        name='7_1')

    net['conv7_2'] = ZeroPadding2D()(net['conv7_1'])
    net['conv7_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
                                   activation='relu', border_mode='valid',
                                   name='conv7_2')(net['conv7_2'])
    # Block 8
    net['conv8_1'] = wrap_with_bn(
        net,
        Convolution2D(128, 1, 1, activation='relu',
                                   border_mode='same',
                                   name='conv8_1')(net['conv7_2']),
        name='8_1')

    net['conv8_2'] = Convolution2D(256, 3, 3, subsample=(2, 2),
                                   activation='relu', border_mode='same',
                                   name='conv8_2')(net['conv8_1'])
    # Last Pool
    net['pool6'] = GlobalAveragePooling2D(name='pool6')(net['conv8_2'])
    # Prediction from fc7
    num_priors = 6
    net['fc7_mbox_loc'] = Convolution2D(num_priors * 4, 3, 3,
                                        border_mode='same',
                                        name='fc7_mbox_loc')(net['fc7'])
    flatten = Flatten(name='fc7_mbox_loc_flat')
    net['fc7_mbox_loc_flat'] = flatten(net['fc7_mbox_loc'])
    name = 'fc7_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    net['fc7_mbox_conf'] = Convolution2D(num_priors * num_classes, 3, 3,
                                         border_mode='same',
                                         name=name)(net['fc7'])
    flatten = Flatten(name='fc7_mbox_conf_flat')
    net['fc7_mbox_conf_flat'] = flatten(net['fc7_mbox_conf'])
    priorbox = PriorBox(img_size, min_size=180, max_size=180.0, aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='fc7_mbox_priorbox')
    net['fc7_mbox_priorbox'] = priorbox(net['fc7'])
    # Prediction from conv6_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
                      name='conv6_2_mbox_loc')(net['conv6_2'])
    net['conv6_2_mbox_loc'] = x
    flatten = Flatten(name='conv6_2_mbox_loc_flat')
    net['conv6_2_mbox_loc_flat'] = flatten(net['conv6_2_mbox_loc'])
    name = 'conv6_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
                      name=name)(net['conv6_2'])
    net['conv6_2_mbox_conf'] = x
    flatten = Flatten(name='conv6_2_mbox_conf_flat')
    net['conv6_2_mbox_conf_flat'] = flatten(net['conv6_2_mbox_conf'])
    priorbox = PriorBox(img_size, 280.0, max_size=280.0, aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv6_2_mbox_priorbox')
    net['conv6_2_mbox_priorbox'] = priorbox(net['conv6_2'])
    # Prediction from conv7_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
                      name='conv7_2_mbox_loc')(net['conv7_2'])
    net['conv7_2_mbox_loc'] = x
    flatten = Flatten(name='conv7_2_mbox_loc_flat')
    net['conv7_2_mbox_loc_flat'] = flatten(net['conv7_2_mbox_loc'])
    name = 'conv7_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
                      name=name)(net['conv7_2'])
    net['conv7_2_mbox_conf'] = x
    flatten = Flatten(name='conv7_2_mbox_conf_flat')
    net['conv7_2_mbox_conf_flat'] = flatten(net['conv7_2_mbox_conf'])
    priorbox = PriorBox(img_size, 420.0, max_size=420.0, aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv7_2_mbox_priorbox')
    net['conv7_2_mbox_priorbox'] = priorbox(net['conv7_2'])
    # Prediction from conv8_2
    num_priors = 6
    x = Convolution2D(num_priors * 4, 3, 3, border_mode='same',
                      name='conv8_2_mbox_loc')(net['conv8_2'])
    net['conv8_2_mbox_loc'] = x
    flatten = Flatten(name='conv8_2_mbox_loc_flat')
    net['conv8_2_mbox_loc_flat'] = flatten(net['conv8_2_mbox_loc'])
    name = 'conv8_2_mbox_conf'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Convolution2D(num_priors * num_classes, 3, 3, border_mode='same',
                      name=name)(net['conv8_2'])
    net['conv8_2_mbox_conf'] = x
    flatten = Flatten(name='conv8_2_mbox_conf_flat')
    net['conv8_2_mbox_conf_flat'] = flatten(net['conv8_2_mbox_conf'])
    priorbox = PriorBox(img_size, 640.0, max_size=640.0, aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='conv8_2_mbox_priorbox')
    net['conv8_2_mbox_priorbox'] = priorbox(net['conv8_2'])
    # Prediction from pool6
    num_priors = 6
    x = Dense(num_priors * 4, name='pool6_mbox_loc_flat')(net['pool6'])
    net['pool6_mbox_loc_flat'] = x
    name = 'pool6_mbox_conf_flat'
    if num_classes != 21:
        name += '_{}'.format(num_classes)
    x = Dense(num_priors * num_classes, name=name)(net['pool6'])
    net['pool6_mbox_conf_flat'] = x
    priorbox = PriorBox(img_size, 900.0, max_size=900.0, aspect_ratios=[2, 3],
                        variances=[0.1, 0.1, 0.2, 0.2],
                        name='pool6_mbox_priorbox')
    if K.image_dim_ordering() == 'tf':
        target_shape = (1, 1, 256)
    else:
        target_shape = (256, 1, 1)
    net['pool6_reshaped'] = Reshape(target_shape,
                                    name='pool6_reshaped')(net['pool6'])
    net['pool6_mbox_priorbox'] = priorbox(net['pool6_reshaped'])
    # Gather all predictions
    net['mbox_loc'] = merge([
                             net['fc7_mbox_loc_flat'],
                             net['conv6_2_mbox_loc_flat'],
                             net['conv7_2_mbox_loc_flat'],
                             net['conv8_2_mbox_loc_flat'],
                             net['pool6_mbox_loc_flat']],
                            mode='concat', concat_axis=1, name='mbox_loc')
    net['mbox_conf'] = merge([
                              net['fc7_mbox_conf_flat'],
                              net['conv6_2_mbox_conf_flat'],
                              net['conv7_2_mbox_conf_flat'],
                              net['conv8_2_mbox_conf_flat'],
                              net['pool6_mbox_conf_flat']],
                             mode='concat', concat_axis=1, name='mbox_conf')
    net['mbox_priorbox'] = merge([
                                  net['fc7_mbox_priorbox'],
                                  net['conv6_2_mbox_priorbox'],
                                  net['conv7_2_mbox_priorbox'],
                                  net['conv8_2_mbox_priorbox'],
                                  net['pool6_mbox_priorbox']],
                                 mode='concat', concat_axis=1,
                                 name='mbox_priorbox')
    if hasattr(net['mbox_loc'], '_keras_shape'):
        num_boxes = net['mbox_loc']._keras_shape[-1] // 4
    elif hasattr(net['mbox_loc'], 'int_shape'):
        num_boxes = K.int_shape(net['mbox_loc'])[-1] // 4
    net['mbox_loc'] = Reshape((num_boxes, 4),
                              name='mbox_loc_final')(net['mbox_loc'])
    net['mbox_conf'] = Reshape((num_boxes, num_classes),
                               name='mbox_conf_logits')(net['mbox_conf'])
    net['mbox_conf'] = Activation('softmax',
                                  name='mbox_conf_final')(net['mbox_conf'])
    net['predictions'] = merge([net['mbox_loc'],
                               net['mbox_conf'],
                               net['mbox_priorbox']],
                               mode='concat', concat_axis=2,
                               name='predictions')
    model = Model(net['input'], net['predictions'])
    return model
 def loadModel(self, model):
     return Xception()
示例#29
0
def main(arguments):
    parser = argparse.ArgumentParser(
        description="deep learning classification experiments argument parser")
    parser.add_argument("-exp_type",
                        help="mnist or imagenet experiments",
                        choices=["mnist", "imagenet"],
                        required=True)
    parser.add_argument("-noise_type",
                        help="targeted or untargeted noise",
                        choices=["targeted", "untargeted"],
                        type=str,
                        required=True)
    parser.add_argument("-holdout",
                        help='index of held out model',
                        choices=range(N_ClASSIFIERS),
                        type=int,
                        required=False)
    parser.add_argument("-data_path",
                        help="directory with experiment data",
                        type=str,
                        required=True)
    parser.add_argument("-model_path",
                        help="directory with model weights",
                        type=str,
                        required=False)
    parser.add_argument("-mwu_iters",
                        help="number of iterations for the MWU",
                        type=int,
                        required=True)
    parser.add_argument("-alpha",
                        help="noise budget",
                        type=float,
                        required=True)
    parser.add_argument("-opt_iters",
                        help="number of iterations to run optimizer",
                        type=int,
                        required=True)
    parser.add_argument("-learning_rate",
                        help="learning rate for the optimizer",
                        type=float,
                        required=True)
    parser.add_argument("-log_level",
                        help='level of info for the logger',
                        choices=['INFO', 'DEBUG'],
                        required=True)
    parser.add_argument(
        "-purpose",
        help='short string (1 word) to describe purpose of experiment',
        type=str,
        required=True)
    args = parser.parse_args(arguments)

    date = datetime.datetime.now()
    holdout_str = "HoldOut{}".format(
        args.holdout) if args.holdout is not None else "FullSet"
    exp_name = "deepLearning_{}_{}_{}_{}_{}_{}_{}".format(
        args.exp_type, args.purpose, args.noise_type, args.alpha, date.month,
        date.day, holdout_str)
    if not os.path.exists('experiment_results/'):
        os.mkdir('experiment_results/')

    exp_dir = 'experiment_results/' + exp_name
    if not os.path.exists(exp_dir):
        os.mkdir(exp_dir)

    log_file = exp_name + ".log"
    log_level = log.DEBUG if args.log_level == 'DEBUG' else log.INFO
    log.basicConfig(format='%(asctime)s: %(message)s',
                    level=log_level,
                    datefmt='%m/%d/%Y %I:%M:%S %p',
                    filename=exp_dir + "/" + log_file,
                    filemode='w')

    log.info("Experiment Type {}".format(args.exp_type))
    log.info("Hold Out Model {}".format(holdout_str))
    log.info("Noise Type {}".format(args.noise_type))
    log.info("Num Classifiers {}".format(N_ClASSIFIERS))
    log.info("MWU Iters {} ".format(args.mwu_iters))
    log.info("Alpha {}".format(args.alpha))
    log.info("Learning Rate {}".format(args.learning_rate))
    log.info("Optimization Iters {}".format(args.opt_iters))
    log.info("Data path : {}".format(args.data_path))
    log.info("Model path {}".format(args.model_path))

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        log.debug("\nbeginning to load models...")

        # setup models
        if args.exp_type == "mnist":
            models = [
                load_model(i,
                           '{}/model_{}_weights.h5'.format(args.model_path, i))
                for i in range(N_ClASSIFIERS)
            ]
            dataset_params = [28, 1, 10, (0.0, 1.0)]
        else:
            input_tensor = Input(shape=(224, 224, 3))
            tf_inputs = Lambda(lambda x: preprocess_input(x, mode='tf'))(
                input_tensor)
            caffe_inputs = Lambda(lambda x: preprocess_input(x, mode='caffe'))(
                input_tensor)

            base_inception = InceptionV3(input_tensor=input_tensor,
                                         weights="imagenet",
                                         include_top=True)
            inception = Model(inputs=input_tensor,
                              outputs=base_inception(tf_inputs))

            base_densenet = DenseNet121(input_tensor=input_tensor,
                                        weights="imagenet",
                                        include_top=True)
            densenet = Model(inputs=input_tensor,
                             outputs=base_densenet(tf_inputs))

            base_resnet = ResNet50(input_tensor=input_tensor,
                                   weights="imagenet",
                                   include_top=True)
            resnet = Model(inputs=input_tensor,
                           outputs=base_resnet(caffe_inputs))

            base_vgg = VGG16(input_tensor=input_tensor,
                             weights="imagenet",
                             include_top=True)
            vgg = Model(inputs=input_tensor, outputs=base_vgg(caffe_inputs))

            base_xception = Xception(input_tensor=input_tensor,
                                     weights="imagenet",
                                     include_top=True)
            xception = Model(inputs=input_tensor,
                             outputs=base_xception(tf_inputs))

            models = [inception, xception, resnet, densenet, vgg]

            for model in models:
                model.compile(optimizer='adam',
                              loss='categorical_crossentropy',
                              metrics=['accuracy'])

            dataset_params = [224, 3, 1000, (0.0, 255.0)]

        if args.holdout is not None:
            del models[args.holdout]
            print "ASDASDASDASDAS Length of Models {}".format(len(models))

        log.debug("finished loading models!\n")

        X_exp = np.load(args.data_path + "/" + "X_exp.npy")[:100]
        Y_exp = np.load(args.data_path + "/" + "Y_exp.npy")[:100]

        log.info("Num Points {}".format(X_exp.shape[0]))
        target_bool = args.noise_type == "targeted"

        # initialize the attack object
        attack_obj = GradientDescentDL(sess,
                                       models,
                                       args.alpha,
                                       dataset_params,
                                       targeted=target_bool,
                                       batch_size=1,
                                       max_iterations=args.opt_iters,
                                       learning_rate=args.learning_rate,
                                       confidence=0)

        noise_func = partial(gradientDescentFunc, attack=attack_obj)

        targeted = False
        weights, noise, loss_history, acc_history, action_loss = run_mwu(
            models,
            args.mwu_iters,
            X_exp,
            Y_exp,
            args.alpha,
            noise_func,
            targeted=targeted,
            dl=True,
            use_ray=False)  # TODO

        np.save(exp_dir + "/" + "weights.npy", weights)
        np.save(exp_dir + "/" + "noise.npy", noise)
        np.save(exp_dir + "/" + "loss_history.npy", loss_history)
        np.save(exp_dir + "/" + "acc_history.npy", acc_history)
        np.save(exp_dir + "/" + "action_loss.npy", action_loss)
        log.info("Success")
示例#30
0
    def __init__(self, top_model):

        self.top_model = top_model

        if self.top_model == 'mobilenet':
            from keras.applications.mobilenet_v2 import MobileNetV2
            self.input_shape = (224, 224)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = MobileNetV2(include_top=False,
                                             input_shape=(self.input_shape[0],
                                                          self.input_shape[1],
                                                          3),
                                             input_tensor=self.inp,
                                             pooling='avg')

        elif self.top_model == 'inception':
            from keras.applications.inception_v3 import InceptionV3
            self.input_shape = (299, 299)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = InceptionV3(include_top=False,
                                             input_shape=(self.input_shape[0],
                                                          self.input_shape[1],
                                                          3),
                                             input_tensor=self.inp,
                                             pooling='avg')

        elif self.top_model == 'vgg':
            from keras.applications.vgg19 import VGG19
            self.input_shape = (224, 224)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = VGG19(include_top=False,
                                       input_shape=(self.input_shape[0],
                                                    self.input_shape[1], 3),
                                       input_tensor=self.inp,
                                       pooling='avg')

        elif self.top_model == 'xception':
            from keras.applications.xception import Xception
            self.input_shape = (299, 299)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = Xception(include_top=False,
                                          input_shape=(self.input_shape[0],
                                                       self.input_shape[1], 3),
                                          input_tensor=self.inp,
                                          pooling='avg')

        elif self.top_model == 'resnet':
            from keras.applications.resnet50 import ResNet50
            self.input_shape = (224, 224)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = ResNet50(include_top=False,
                                          input_shape=(self.input_shape[0],
                                                       self.input_shape[1], 3),
                                          input_tensor=self.inp,
                                          pooling='avg')

        elif self.top_model == 'inception-resnet':
            from keras.applications.inception_resnet_v2 import InceptionResNetV2
            self.input_shape = (299, 299)
            self.inp = Input((self.input_shape[0], self.input_shape[1], 3))
            self.initial_model = InceptionResNetV2(
                include_top=False,
                input_shape=(self.input_shape[0], self.input_shape[1], 3),
                input_tensor=self.inp,
                pooling='avg')

        else:
            raise Exception(
                "Values allowed for model parameter are - mobilenet, inception, vgg, xception, resnet and inception-resnet. Value passed was: {}"
                .format(self.top_model))