def createModelGrayscale(): net = EfficientNetB5((256, 256, 1), weights=None, include_top=False, pooling='avg') x = Dense(14, kernel_initializer=EfficientNetDenseInitializer())(net.output) x = Activation('sigmoid')(x) model = Model(net.input, x) model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', auc_roc]) model.summary() return model
def model_fn(FLAGS, objective, optimizer, metrics): model = EfficientNetB5(weights=None, include_top=False, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes, pooling=max) model.load_weights('/home/work/user-job-dir/src/efficientnet-b5_notop.h5') for i, layer in enumerate(model.layers): if "batch_normalization" in layer.name: model.layers[i] = GroupNormalization(groups=32, axis=-1, epsilon=0.00001) x = model.output x = GlobalAveragePooling2D()(x) x = Dropout(0.4)(x) predictions = Dense(FLAGS.num_classes, activation='softmax')(x) # activation="linear",activation='softmax' model = Model(input=model.input, output=predictions) model = multi_gpu_model(model, 4) # 修改成自身需要的GPU数量,4代表用4个GPU同时加载程序 # model.load_weights('/home/work/user-job-dir/src/weights_004_0.9223.h5') model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def model_fn(FLAGS, objective, optimizer, metrics): model = EfficientNetB5(weights='imagenet', include_top=False, input_shape=(FLAGS.input_size, FLAGS.input_size, 3), classes=FLAGS.num_classes, pooling=max) #model.load_weights('/home/yons/code/tmp_garbage/model_snapshots/weights_001_0.6548.h5') for i, layer in enumerate(model.layers): if "batch_normalization" in layer.name: model.layers[i] = GroupNormalization(groups=32, axis=-1, epsilon=0.00001) x = model.output x = GlobalAveragePooling2D()(x) x = Dropout(0.4)(x) predictions = Dense(FLAGS.num_classes, activation='softmax')( x) # activation="linear",activation='softmax' model = Model(input=model.input, output=predictions) #model = multi_gpu_model(model, 4) # 修改成自身需要的GPU数量,4代表用4个GPU同时加载程序 #model.load_weights('/home/yons/code/tmp_garbage/model_snapshots_3_有用的异常模型/weights_009_0.7229.h5') model.compile(loss=objective, optimizer=optimizer, metrics=metrics) return model
def cnn_model(model_name, img_size): """ Model definition using Xception net architecture """ input_size = (img_size, img_size, 3) if model_name == "xception": print("Loading Xception wts...") baseModel = Xception(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "iv3": baseModel = InceptionV3(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "irv2": baseModel = InceptionResNetV2(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "resnet": baseModel = ResNet50(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "nasnet": baseModel = NASNetLarge(weights="imagenet", include_top=False, input_shape=(img_size, img_size, 3)) elif model_name == "ef0": baseModel = EfficientNetB0(input_size, weights="imagenet", include_top=False) elif model_name == "ef5": baseModel = EfficientNetB5(input_size, weights="imagenet", include_top=False) headModel = baseModel.output headModel = GlobalAveragePooling2D()(headModel) headModel = Dense(512, activation="relu", kernel_initializer="he_uniform")(headModel) headModel = Dropout(0.4)(headModel) # headModel = Dense(512, activation="relu", kernel_initializer="he_uniform")( # headModel # ) # headModel = Dropout(0.5)(headModel) predictions = Dense(5, activation="softmax", kernel_initializer="he_uniform")(headModel) model = Model(inputs=baseModel.input, outputs=predictions) for layer in baseModel.layers: layer.trainable = False optimizer = Nadam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, schedule_decay=0.004) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) return model
def main(): args = parse_args() if args.name is None: args.name = 'WideResNet%s-%s' % (args.depth, args.width) if args.cutout: args.name += '_wCutout' if args.auto_augment: args.name += '_wAutoAugment' if not os.path.exists('models/%s' % args.name): os.makedirs('models/%s' % args.name) print('Config -----') for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg))) print('------------') with open('models/%s/args.txt' % args.name, 'w') as f: for arg in vars(args): print('%s: %s' % (arg, getattr(args, arg)), file=f) joblib.dump(args, 'models/%s/args.pkl' % args.name) # create model # input_layer = Input(shape=(28,28,1)) # input_image_ = Lambda(lambda x: K.repeat_elements(K.expand_dims(x, 3), 3, 3))(input_layer) # # model = ResNext(img_dim, depth=depth, cardinality=cardinality, width=width, weights=None, classes=nb_classes) # se_resnext= SEResNeXt() # model1=se_resnext.build_model(inputs=input_layer,num_classes=10,include_top=False) #wrn # model1 = Dense(10)(model1) # # model1.load_weights("",by_name=True) # # model1 = NASNetMobile(input_tensor=input_image_, include_top=False, pooling='avg', weights="./model/nasnet.hdf5") # # # model2 = create_shakeshake_cifar(n_classes=10,include_top=False,x_in=input_layer) # model2 = Dense(10)(model2) # input = Input(shape=(56, 56, 1)) input_image_ = Lambda(lambda x: K.repeat_elements(x, 3, 3))(input) print(input_image_.shape) model = EfficientNetB5(input_shape=(56, 56, 3), input_tensor=input_image_, classes=10, include_top=False, weights='imagenet') x = keras.layers.GlobalAveragePooling2D()(model.output) x = keras.layers.Dropout(0.5)(x) x = keras.layers.Dense(10)(x) x = keras.layers.Activation('softmax')(x) model = keras.models.Model(input, x) # model = WideResNet(args.depth, args.width, num_classes=10) # model3 = Dense(10)(model3) # t = keras.layers.Concatenate(axis=1)([model1, model2]) # 第一个全连接层 # top_model = Dense(units=128, activation="relu")(t) # top_model = Dropout(rate=0.5)(top_model) # top_model = Dense(units=10, activation="softmax")(top_model) # # model = Model(inputs=input_layer, outputs=top_model) model.summary() # model = WideResNet(args.depth, args.width, num_classes=10) model.compile( # loss=[focal_loss(alpha=.25, gamma=2)], loss=keras.losses.mean_absolute_error, # loss='categorical_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9), # optimizer=Adam(1e-3), # optimizer=RectifiedAdam(1e-3), metrics=[keras.metrics.mean_absolute_error] # metrics = ['accuracy'] ) # (x_train, y_train), (x_test, y_test) = cifar10.load_data() mnist = read_data_sets( './data/fashion', reshape=False, validation_size=0, source_url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/' ) x_train = mnist.train.images y_train = mnist.train.labels x_test = mnist.test.images y_test = mnist.test.labels height, width = 56, 56 x_train = x_train.reshape((-1, 28, 28)) x_train = np.array([ misc.imresize(x, (height, width)).astype(float) for x in tqdm(iter(x_train)) ]) / 255. x_train = x_train.reshape((-1, height, width, 1)) x_test = x_test.reshape((-1, 28, 28)) x_test = np.array([ misc.imresize(x, (height, width)).astype(float) for x in tqdm(iter(x_test)) ]) / 255. x_test = x_test.reshape((-1, height, width, 1)) x_train = np.uint8(x_train * 255) x_test = np.uint8(x_test * 255) datagen = Cifar10ImageDataGenerator(args) x_test = datagen.standardize(x_test) y_train = keras.utils.to_categorical(y_train, 10) y_test = keras.utils.to_categorical(y_test, 10) weights_file = "models/merge.h5" model.load_weights(weights_file) lr_reducer = ReduceLROnPlateau( monitor='val_loss', factor=0.9, # 当标准评估停止提升时,降低学习速率。 cooldown=0, patience=20, min_lr=1e-8) model_checkpoint = ModelCheckpoint(weights_file, monitor="val_acc", save_best_only=True, mode='auto') callbacks = [ # ModelCheckpoint('models/%s/model.hdf5'%args.name, verbose=1, save_best_only=True), lr_reducer, model_checkpoint, CSVLogger('models/%s/log.csv' % args.name), CosineAnnealingScheduler(T_max=args.epochs, eta_max=0.05, eta_min=4e-4) ] model.fit_generator(datagen.flow(x_train, y_train, batch_size=args.batch_size), steps_per_epoch=len(x_train) // args.batch_size, validation_data=(x_test, y_test), epochs=args.epochs, verbose=1, callbacks=callbacks) scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) os.system('shutdown -s -f -t 59')
def model_confirm(self, choosed_model): if choosed_model == 'VGG16': model = MODEL(self.config).VGG16() elif choosed_model == 'VGG19': model = MODEL(self.config).VGG19() elif choosed_model == 'AlexNet': model = MODEL(self.config).AlexNet() elif choosed_model == 'LeNet': model = MODEL(self.config).LeNet() elif choosed_model == 'ZF_Net': model = MODEL(self.config).ZF_Net() elif choosed_model == 'ResNet18': model = ResnetBuilder().build_resnet18(self.config) elif choosed_model == 'ResNet34': model = ResnetBuilder().build_resnet34(self.config) elif choosed_model == 'ResNet101': model = ResnetBuilder().build_resnet101(self.config) elif choosed_model == 'ResNet152': model = ResnetBuilder().build_resnet152(self.config) elif choosed_model == 'mnist_net': model = MODEL(self.config).mnist_net() elif choosed_model == 'TSL16': model = MODEL(self.config).TSL16() elif choosed_model == 'ResNet50': model = keras.applications.ResNet50(include_top=True, weights=None, input_tensor=None, input_shape=(self.normal_size, self.normal_size, self.channles), pooling='max', classes=self.classNumber) elif choosed_model == 'InceptionV3': model = keras.applications.InceptionV3( include_top=True, weights=None, input_tensor=None, input_shape=(self.normal_size, self.normal_size, self.channles), pooling='max', classes=self.classNumber) elif choosed_model == 'Xception': model = keras.applications.Xception(include_top=True, weights=None, input_tensor=None, input_shape=(self.normal_size, self.normal_size, self.channles), pooling='max', classes=self.classNumber) elif choosed_model == 'MobileNet': model = keras.applications.MobileNet(include_top=True, weights=None, input_tensor=None, input_shape=(self.normal_size, self.normal_size, self.channles), pooling='max', classes=self.classNumber) elif choosed_model == 'InceptionResNetV2': model = keras.applications.InceptionResNetV2( include_top=True, weights=None, input_tensor=None, input_shape=(self.normal_size, self.normal_size, self.channles), pooling='max', classes=self.classNumber) elif choosed_model == 'SEResNetXt': model = SEResNetXt(self.config).model elif choosed_model == 'DenseNet': depth = 40 nb_dense_block = 3 growth_rate = 12 nb_filter = 12 bottleneck = False reduction = 0.0 dropout_rate = 0.0 img_dim = (self.channles, self.normal_size ) if K.image_data_format == 'channels_last' else ( self.normal_size, self.normal_size, self.channles) model = densenet.DenseNet(img_dim, classNumber=self.classNumber, depth=depth, nb_dense_block=nb_dense_block, growth_rate=growth_rate, nb_filter=nb_filter, dropout_rate=dropout_rate, bottleneck=bottleneck, reduction=reduction, weights=None) elif choosed_model == 'SENet': model = sm.Unet('senet154', input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, activation='softmax', encoder_weights=None) #model.summary() elif choosed_model == 'EfficientNetB5': model = EfficientNetB5(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'EfficientNetB4': model = EfficientNetB4(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'EfficientNetB3': model = EfficientNetB3(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'EfficientNetB2': model = EfficientNetB2(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'EfficientNetB1': model = EfficientNetB1(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'EfficientNetB0': model = EfficientNetB0(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4, weights=None) elif choosed_model == 'MobileNetV3_Large': model = MobileNetV3_Large(shape=(self.normal_size, self.normal_size, self.channles), n_class=4).build() elif choosed_model == 'MobileNetV3_Small': model = MobileNetV3_Small(shape=(self.normal_size, self.normal_size, self.channles), n_class=4).build() elif choosed_model == 'NASNetLarge': model = NASNetLarge(input_shape=(self.normal_size, self.normal_size, self.channles), weights=None, use_auxiliary_branch=False, classes=4) elif choosed_model == 'NASNetMobile': model = NASNetMobile(input_shape=(self.normal_size, self.normal_size, self.channles), weights=None, use_auxiliary_branch=False, classes=4) elif choosed_model == 'NASNetMiddle': model = NASNetMiddle(input_shape=(self.normal_size, self.normal_size, self.channles), weights=None, use_auxiliary_branch=False, classes=4) elif choosed_model == 'ShuffleNet': model = ShuffleNet(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4) elif choosed_model == 'ShuffleNetV2': model = ShuffleNetV2(input_shape=(self.normal_size, self.normal_size, self.channles), classes=4) return model
adam = keras.optimizers.Adam(lr=0.0001) effb0_model.compile(optimizer=adam, loss='binary_crossentropy', metrics=['binary_accuracy']) effb0_model.load_weights('ml_models/efficient0_2_model.h5') eff_prob = effb0_model.predict(img) ##################################################################################################################################### ##################################################################################################################################### from keras_efficientnets import EfficientNetB5 effb5_model = EfficientNetB5(include_top=False, weights='ml_models/efficientnet-b5_notop.h5', pooling='avg', input_shape=(96, 96, 3)) x = effb5_model.output x = Dense(32)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dropout(0.25)(x) x = Dense(16)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dropout(0.25)(x) x = Dense(8)(x) x = BatchNormalization()(x)
from keras.models import Sequential from keras.layers import Dense, Activation, Dropout, Flatten, Conv2D, MaxPooling2D,BatchNormalization from keras import backend as K from PIL import Image img_width, img_height = 224, 224 """ if K.image_data_format() == 'channels_first': input_shape = (1, img_width, img_height) else: input_shape = (img_width, img_height, 1) """ from keras_efficientnets import EfficientNetB5 model = EfficientNetB5(include_top=False, weights='imagenet',pooling='avg',input_shape=(224,224,3)) #model = keras.applications.nasnet.NASNetLarge(include_top=False, weights='model_weights/NASNet-large-no-top.h5',input_shape=input_shape,pooling='avg') #model = keras.applications.xception.Xception(include_top=False, weights='imagenet',pooling='avg',input_shape=(299,299,3)) for layer in model.layers[0:-1]: layer.trainable = True x = model.output x = Dense(64)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dropout(0.3)(x)