def mobilenet_32s(train_encoder=True, final_layer_activation='sigmoid', prep=True): ''' This script creates a model object and loads pretrained weights ''' net = MobileNet(include_top=False, weights=None) if prep == True: net.load_weights(os.path.join('.', 'keras_preprocessing_weights.h5'), by_name=True) else: net.load_weights(os.path.join('.', 'wences_preprocessing_weights.h5'), by_name=True) for layer in net.layers: layer.trainable = train_encoder #build decoder predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv32 = Conv2DTranspose(filters=1, kernel_size=64, strides=32, padding='same', use_bias=False, activation=final_layer_activation)(predict) return Model(inputs=net.input, outputs=deconv32)
def mobilenet_32s(train_encoder=True, final_layer_activation="sigmoid", prep=True): """ This script creates a model object and loads pretrained weights """ net = MobileNet(include_top=False, weights=None) if prep == True: net.load_weights(os.path.join(".", "mn_classification_weights.h5"), by_name=True) else: net.load_weights(os.path.join(".", "test_preprocessing_weights.h5"), by_name=True) for layer in net.layers: layer.trainable = train_encoder # build decoder predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv32 = Conv2DTranspose( filters=1, kernel_size=64, strides=32, padding="same", use_bias=False, activation=final_layer_activation, )(predict) return Model(inputs=net.input, outputs=deconv32)
def mobilenet_8s(train_encoder=True, final_layer_activation="sigmoid", prep=True): """ This script creates a model object and loads pretrained weights """ net = MobileNet(include_top=False, weights=None) if prep == True: net.load_weights(os.path.join(".", "mn_classification_weights.h5"), by_name=True) else: net.load_weights(os.path.join(".", "test_preprocessing_weights.h5"), by_name=True) for layer in net.layers: layer.trainable = train_encoder # build decoder predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv2 = Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding="same", use_bias=False)(predict) pred_conv_pw_11_relu = Conv2D(filters=1, kernel_size=1, strides=1)( net.get_layer("conv_pw_11_relu").output) fuse1 = Add()([deconv2, pred_conv_pw_11_relu]) pred_conv_pw_5_relu = Conv2D(filters=1, kernel_size=1, strides=1)( net.get_layer("conv_pw_5_relu").output) deconv2fuse1 = Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding="same", use_bias=False)(fuse1) fuse2 = Add()([deconv2fuse1, pred_conv_pw_5_relu]) deconv8 = Conv2DTranspose( filters=1, kernel_size=16, strides=8, padding="same", use_bias=False, activation=final_layer_activation, )(fuse2) return Model(inputs=net.input, outputs=deconv8)
def __call__(self): model = MobileNet(weights='imagenet', include_top=False, input_shape=(self.img_size, self.img_size, 3)) x = model.output x = GlobalAveragePooling2D()(x) x = Dropout(0.15)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.15)(x) x = Dense(1024, activation='relu')(x) x = Dropout(0.15)(x) age_preds = Dense(7, activation='softmax', name="pred_age")(x) gender_preds = Dense(2, activation='softmax', name="pred_gender")(x) model = Model(inputs=model.input, outputs=[gender_preds, age_preds]) model.load_weights(self.weights_file) return model
model.compile(optimizer=Adam(lr=learning_rate), loss='categorical_crossentropy', metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy]) print(model.summary()) model.fit_generator( train_datagen, steps_per_epoch=STEPS, # initial_epoch = initial_epoch, epochs=EPOCHS, verbose=1, validation_data=(x_valid, y_valid), callbacks = callbacks ) model.load_weights(MODEL_WEIGHTS_FILE) valid_predictions = model.predict(x_valid, batch_size=128, verbose=1) map3 = mapk(valid_df[['y']].values, preds2catids(valid_predictions).values) print('Map3: {:.3f}'.format(map3)) test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv')) test.head() x_test = df_to_image_array_xd(test, size) print(test.shape, x_test.shape) print('Test array memory {:.2f} GB'.format(x_test.nbytes / 1024.**3 )) test_predictions = model.predict(x_test, batch_size=128, verbose=1) top3 = preds2catids(test_predictions) top3.head()
x_train_resized = image_crop.resize_imgs(x_train) x_test_resized = image_crop.resize_imgs(x_test) y_train = to_categorical(y_train, num_classes=2) y_test = to_categorical(y_test, num_classes=2) print(np.shape(x_train_resized)) print(np.shape(x_test_resized)) model = MobileNet(include_top=True, weights=None, classes=2, pooling='max', input_shape=(200, 200, 3)) model.load_weights(weight_path) checkpoint = ModelCheckpoint(filepath=os.path.join( save_dir, 'MobileNetV2_weight.{epoch:02d}-{loss:.2f}-{categorical_accuracy:.2f}.hdf5' ), verbose=1, monitor='categorical_accuracy', save_best_only=True) opt = Adam(lr=5e-6) model.compile(optimizer=opt, loss=losses.categorical_crossentropy, metrics=[metrics.categorical_accuracy]) # model.fit(x_train_resized,y_train,epochs=20,batch_size=6,callbacks=[checkpoint]) #
from mycbk import * import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "12" def draw_predict(img,y_pred): plt.rcParams['figure.figsize'] = [16, 10] plt.rcParams['font.size'] = 14 n = 5 fig,axs = plt.subplots(nrows=n,ncols=n,sharex=True,sharey=True,figsize=(16,6)) for i in range(n**2): ax = axs[i // n, i % n] ax.imshow(img[i].astype(np.uint8)) ax.text(130,6,pred2text(y_pred[i]),fontsize=15,color = 'blue', bbox=dict(boxstyle="square",facecolor='wheat')) ax.axis('off') plt.tight_layout() fig.savefig(address_predict, dpi=300) plt.show() length = get_char_length() input_shape = (IMAGE_HEIGHT,IMAGE_WIDTH,CHANNEL) model = MobileNet(input_shape=input_shape,alpha=1.,weights=None,classes=CHAR_NUM*length) model.load_weights(address_model) [img,x,y] = Generate_Data().test() y_pred = model.predict(x) draw_predict(img,y_pred)
import os import keras import json import matplotlib.pyplot as plt from keras.layers import Dense, GlobalAveragePooling2D from keras.applications import MobileNet from keras.preprocessing import image from keras.applications.mobilenet import preprocess_input from keras.preprocessing.image import ImageDataGenerator from keras.models import Model from keras.optimizers import Adam import cv2 base_model = MobileNet( weights=None, include_top=False ) #imports the mobilenet model and discards the last 1000 neuron layer. base_model.load_weights('mobilenet_1_0_224_tf_no_top.h5') x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')( x ) #we add dense layers so that the model can learn more complex functions and classify for better results. x = Dense(1024, activation='relu')(x) #dense layer 2 x = Dense(512, activation='relu')(x) #dense layer 3 preds = Dense(2, activation='softmax')(x) #final layer with softmax activation model = Model(inputs=base_model.input, outputs=preds) #specify the inputs #specify the outputs #now a model has been created based on our architecture for layer in model.layers: layer.trainable = False
def run(args): lr = args.lr epochs = args.epochs decay = args.decay momentum = args.momentum h5file = args.model test_set_path = args.test hist = args.hist dataset = pd.read_csv( os.path.join('/home', 'wvillegas', 'dataset-mask', 'full_masks.csv')) from utils_fcn import DataGeneratorMobileNet from sklearn.model_selection import train_test_split X_train, X_test, Y_train, Y_test = train_test_split(dataset['orig'], dataset['mask'], test_size=0.2, random_state=1) partition = {'train': list(X_train), 'test': list(X_test)} img_list = list(X_train) + list(X_test) mask_list = list(Y_train) + list(Y_test) labels = dict(zip(img_list, mask_list)) img_path = os.path.join('/home', 'wvillegas', 'dataset-mask', 'dataset_resize', 'images_resize') masks_path = os.path.join('/home', 'wvillegas', 'dataset-mask', 'dataset_resize', 'masks_resize') batch_size = 4 train_generator = DataGeneratorMobileNet(batch_size=batch_size, img_path=img_path, labels=labels, list_IDs=partition['train'], n_channels=3, n_channels_label=1, shuffle=True, mask_path=masks_path) from keras.applications import MobileNet from keras.layers import Conv2DTranspose, Conv2D, Add from keras import Model net = MobileNet(include_top=False, weights=None) net.load_weights( '/home/wvillegas/DLProjects/BudClassifier/cmdscripts/modelosV2/mobilenet_weights_detection.h5', by_name=True) for layer in net.layers: layer.trainable = True predict = Conv2D(filters=1, kernel_size=1, strides=1)(net.output) deconv2 = Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding='same', use_bias=False)(predict) pred_conv_dw_11_relu = Conv2D(filters=1, kernel_size=1, strides=1)( net.get_layer('conv_dw_11_relu').output) fuse1 = Add()([deconv2, pred_conv_dw_11_relu]) pred_conv_pw_5_relu = Conv2D(filters=1, kernel_size=1, strides=1)( net.get_layer('conv_pw_5_relu').output) deconv2fuse1 = Conv2DTranspose(filters=1, kernel_size=4, strides=2, padding='same', use_bias=False)(fuse1) fuse2 = Add()([deconv2fuse1, pred_conv_pw_5_relu]) deconv8 = Conv2DTranspose(filters=1, kernel_size=16, strides=8, padding='same', use_bias=False)(fuse2) fcn = Model(inputs=net.input, outputs=deconv8) from keras.optimizers import SGD sgd = SGD(lr=lr, momentum=momentum, decay=decay) fcn.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) history = fcn.fit_generator(generator=train_generator, use_multiprocessing=True, workers=6, epochs=epochs) fcn.save(os.path.join(h5file)) test_csv = pd.DataFrame({'x': X_test, 'y': Y_test}) test_csv.to_csv(test_set_path, header=None) test_csv = pd.DataFrame(history.history) test_csv.to_csv(hist)
import os from keras.applications import MobileNet from keras.backend import categorical_crossentropy from keras.callbacks import TensorBoard, ModelCheckpoint from dataset import SmokeGifSequence if __name__ == '__main__': input_shape = (299, 299, 3) # hdf = "m1.h5" hdf = "temporal_vg_smoke_v1.h5" m = MobileNet(input_shape=(299, 299, 20), weights=None, classes=2) # load_model(hdf) m.load_weights(hdf) m.compile("adam", categorical_crossentropy, metrics=["accuracy"]) # plot_model(m, show_shapes=True) m.summary() # data_dir = "/blender/storage/datasets/smoking/gifs/" data_dir = "/blender/storage/datasets/vg_smoke" train_seq = SmokeGifSequence(data_dir, neg_txt='negatives.txt', pos_txt='positives.txt', input_shape_hwc=input_shape, only_temporal=True) # val_seq = SmokeGifSequence(data_dir, neg_txt='validate_neg.txt', pos_txt='validate_pos.txt', # input_shape_hwc=input_shape, # only_temporal=True)