def save_model12(new_model_path, conv_model_path): model = NASNetLarge( input_shape=(img_width, img_height, 3), include_top=False, weights=None ) if pretrained: model = NASNetLarge( input_shape=(img_width, img_height, 3), include_top=False, weights='imagenet' ) model.summary() transfer_layer = model.get_layer('?') conv_model = Model(inputs=model.input, outputs=transfer_layer.output) new_model = Sequential() new_model.add(conv_model) new_model.add(GlobalAveragePooling2D()) if num_fc_layers>=1: new_model.add(Dense(num_fc_neurons, activation='relu')) if num_fc_layers>=2: new_model.add(Dropout(dropout)) new_model.add(Dense(num_fc_neurons, activation='relu')) if num_fc_layers>=3: new_model.add(Dropout(dropout)) new_model.add(Dense(num_fc_neurons, activation='relu')) new_model.add(Dense(num_classes, activation='softmax')) print(new_model.summary()) new_model.save(new_model_path) conv_model.save(conv_model_path) return
def evaluation(args): path_img_val = '../datasets/ilsvrc2012/images/val/' path_val_info = '../datasets/ilsvrc2012/images/val.txt' if args.model == 'vgg16': model = VGG16(weights='imagenet') model.summary() elif args.model == 'resnet152': model = ResNet152(weights='imagenet') model.summary() elif args.model == 'resnet152v2': model = ResNet152V2(weights='imagenet') model.summary() elif args.model == 'inceptionresnetv2': model = InceptionResNetV2(weights='imagenet') model.summary() elif args.model == 'densenet201': model = DenseNet201(weights='imagenet') model.summary() elif args.model == 'nasnetlarge': model = NASNetLarge(weights='imagenet') model.summary() name, label = load_header_imagenet(load_file(path_val_info)) pred = list() for i, n in enumerate(name): x = preprocessing_imagenet(path_img_val + n, args) pred.append(np.argmax(model.predict(x), axis=1)[0]) if i % 1000 == 0: print(n) correct = len([p for p, l in zip(pred, label) if p == l]) print('Accuracy of the IMAGENET dataset using model %s: %.4f' % (args.model, correct / len(label)))
def extract_features(directory, ids, model): if int(model) == 1: print("1") # load ResNet50 model model = ResNet50() input_size = 224 else: print("2") # load NASNetLarge model model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet', input_tensor=None, pooling=None) input_size = 331 # pops the last layer to get the features model.layers.pop() model = Model(inputs=model.inputs, outputs=model.layers[-2].output) model.summary() print(len(model.layers)) # model characteristics plot_model(model, to_file='model.png') imgs = load_list(ids) print('Dataset: %d' % len(imgs)) N = len(imgs) print(N) results = [] i = 0 batch_size = 1 # this can be 8 for a GTX 1080 Ti and 32G of RAM while i < N: if i % 1024 == 0: print('{} from {} images.'.format(i, N)) batch = imgs[i:i + batch_size] i += batch_size images = [ load_img(os.path.join(directory, img + ".jpg"), target_size=(input_size, input_size)) for img in batch ] images = [preprocess_input(img_to_array(img)) for img in images] images = np.stack(images) r = model.predict(images) for ind in range(batch_size): results.append(r[ind]) return results
def apply_Feature_Extractor_model(params): """ Apply a previously trained model. :param params: Hyperparameters :return: """ model = None if params['MODEL_TYPE'] == 'InceptionV3': model = InceptionV3(weights='imagenet', include_top=False) elif params['MODEL_TYPE'] == 'NASNetLarge': model = NASNetLarge(weights='imagenet', include_top=False) elif params['MODEL_TYPE'] == 'ResNet152': model = ResNet152V2(weights='imagenet', include_top=False) print(model.summary()) base_path = params['DATA_ROOT_PATH'] for s in params['EXTRACT_ON_SETS']: if params['SPLIT_OUTPUT']: path_general = params['STORE_PATH'] + '/' + params.get( 'MODEL_TYPE', 'features') + '/' + s + '/' if not os.path.isdir( path_general): # create dir if it doesn't exist os.makedirs(path_general) list_filepath = base_path + '/' + params['IMG_FILES'][s] image_list = file2list(list_filepath) eta = -1 start_time = time.time() n_images = len(image_list) for n_sample, imname in list(enumerate(image_list)): if params['MODEL_TYPE'] == 'InceptionV3': features = inceptionV3(model, imname) elif params['MODEL_TYPE'] == 'NASNetLarge': features = nasNetLarge(model, imname) elif params['MODEL_TYPE'] == 'ResNet152': features = resNet152(model, imname) # Keras puts the spatial dimensions at the start. We may want to put them at the end if params.get('SPATIAL_LAST', True): features = features.transpose(0, 3, 1, 2) filepath = path_general + imname.split( '/')[-1][:-4] + '.npy' if imname.split( '/')[-1][-4:] == '.jpg' or imname.split('/')[-1][ -4:] == '.png' else path_general + imname.split( '/')[-1] + '.npy' numpy2file(filepath, features, permission='wb', split=False) sys.stdout.write('\r') sys.stdout.write("\t Processed %d/%d - ETA: %ds " % (n_sample, n_images, int(eta))) sys.stdout.flush() eta = (n_images - n_sample) * (time.time() - start_time) / max( n_sample, 1) print("Features saved in", path_general)
def cnn_experiment(n): x, y = load_images(grayScale=False) # Try 3 layers with increasing size - 128, 256, 512 '''print("\n\nCNN with increasing layers and filter size:") for layer in range(0, 3): filter_size = 2** (layer + 7) print("%s Layers, Filter size of %s" % (str(layer + 1), str(filter_size))) model = getModel_Filters(n, layer + 1) print(model.summary()) cnn(x, y, n, model) print("\n\nCNN with increasing layers and contant filter size:") #128 to 512 for pwr in range(7, 10): size = 2 ** pwr print("Filter size of %s" % str(size)) for layer in range(0, 3): print("%s Layers" % str(layer + 1)) model = getModel_Layers(n, layer + 1, size) cnn(x, y, n, model) ''' ''' print("\n\nResNet:") base_model = ResNet50(include_top=False,weights='imagenet',input_shape=(64,64,3)) print(base_model.summary()) out = base_model.output out = Flatten()(out) out = Dense(4096, activation='relu')(out) out = Dense(4096, activation='relu')(out) out = Dense(62, activation='softmax')(out) model = Model(inputs=base_model.input, outputs=out) cnn(x, y, n, model, pretrained=True) ''' print("\n\nNASNet:") base_model = NASNetLarge(include_top=False, weights='imagenet', input_shape=(n, n, 3)) print(base_model.summary()) out = base_model.output out = Flatten()(out) out = Dense(4096, activation='relu')(out) out = Dense(4096, activation='relu')(out) out = Dense(62, activation='softmax')(out) model = Model(inputs=base_model.input, outputs=out) cnn(x, y, n, model, pretrained=True)
batch_size = 16 nb_epoch = 10 # Load Cifar10 data. Please implement your own load_data() module for your own dataset X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols) # Load our model # model = densenet169_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes) # load keras model model = NASNetLarge(weights=None, classes=10) sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) model.summary() # Start Fine-tuning model.fit( X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, shuffle=True, verbose=1, validation_data=(X_valid, Y_valid), ) # Make predictions predictions_valid = model.predict(X_valid, batch_size=batch_size,
def main(): os_config() if os.path.exists('log') == False: os.mkdir('log') if os.path.exists('acc_class') == False: os.mkdir('acc_class') batch_size = 32 epochs = 200 target_class_num = 2000 model_select = 'Xception' #InceptionV3, Xception, InceptionResNetV2, NASNetLarge VGG16 LogFileName = model_select + '_e' + str(epochs) + 'c' + str( target_class_num) model_name = model_select + '_e' + str(epochs) + 'c' + str( target_class_num) + '.h5' train_data, train_label, test_data, test_label = read_pickle( "/HDD/joshua/DemoTop" + str(target_class_num), "data.pickle") train_data = np.array(train_data) train_label = np.array(train_label) test_data = np.array(test_data) test_label = np.array(test_label) # train_data, train_label, organ_label, test_data, test_label, organ_test_label = read_pickle("OrganTop" + str(target_class_num), "data.pickle") train_label = one_hot_encode(train_label, target_class_num) test_label = one_hot_encode(test_label, target_class_num) print("training data shape: {}".format(np.shape(train_data))) print("training label shape: {}".format(np.shape(train_label))) print("test data shape: {}".format(np.shape(test_data))) print("test label shape: {}".format(np.shape(test_label))) # normalize inputs from 0-255 to 0.0-1.0 train_data = train_data.astype('float32') test_data = test_data.astype('float32') train_data = train_data / 255.0 test_data = test_data / 255.0 input = Input(shape=(227, 227, 3), name='image_input') if model_select == 'VGG16': from keras.applications.vgg16 import VGG16 transfer_model = VGG16(weights='imagenet', include_top=False) transfer_model.summary() for layer in transfer_model.layers: layer.trainable = True output_transfer = transfer_model(input) output_transfer = Flatten(name='flatten')(output_transfer) output_transfer = Dense(4096, name='my_dense1')(output_transfer) output_transfer = Dense(4096, name='my_dense2')(output_transfer) if model_select == 'NASNetLarge': # model from keras.applications.nasnet import NASNetLarge input = Input(shape=(331, 331, 3), name='image_input') train_data = group_imresize(train_data, 331) test_data = group_imresize(test_data, 331) print("New training data shape: {}".format(np.shape(train_data))) print("New test data shape: {}".format(np.shape(test_data))) transfer_model = NASNetLarge(weights='imagenet', include_top=False) # transfer_model.summary() for layer in transfer_model.layers: layer.trainable = True output_transfer = transfer_model(input) output_transfer = GlobalAveragePooling2D( name='my_GlobalAveragePooling')(output_transfer) if model_select == 'InceptionV3': # model from keras.applications.inception_v3 import InceptionV3 transfer_model = InceptionV3(weights='imagenet', include_top=False) # transfer_model = InceptionV3(weights=None, include_top=False) transfer_model.summary() for layer in transfer_model.layers: layer.trainable = True output_transfer = transfer_model(input) output_transfer = AveragePooling2D( pool_size=(5, 5), strides=1, name='my_averagePooling')(output_transfer) output_transfer = Dropout(0.4, name='my_dropOut')(output_transfer) output_transfer = Flatten(name='flatten')(output_transfer) if model_select == 'Xception': from keras.applications.xception import Xception transfer_model = Xception(weights='imagenet', include_top=False) for layer in transfer_model.layers: layer.trainable = True output_transfer = transfer_model(input) output_transfer = GlobalAveragePooling2D( name='my_global_pool')(output_transfer) if model_select == 'InceptionResNetV2': from keras.applications.inception_resnet_v2 import InceptionResNetV2 transfer_model = InceptionResNetV2(weights='imagenet', include_top=False) # transfer_model = InceptionResNetV2(weights=None, include_top=False) transfer_model.summary() for layer in transfer_model.layers: layer.trainable = True output_transfer = transfer_model(input) output_transfer = GlobalAveragePooling2D( name='my_globalAveragePooling')(output_transfer) # output_transfer = AveragePooling2D(pool_size=(5, 5), strides=1)(output_transfer) output_transfer = Dropout(0.2, name='my_dropOut')(output_transfer) # output_transfer = Flatten(name='flatten')(output_transfer) out = Dense(target_class_num, activation='softmax', name='predictions')(output_transfer) # Create your own model my_model = Model(input=input, output=out) my_model.summary() sgd = SGD(lr=0.045, decay=1e-6, momentum=0.9, nesterov=True) # my_model = multi_gpu_model(my_model, gpus=2) my_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy', top_3_acc, top_5_acc]) train_datagen = ImageDataGenerator( # width_shift_range = 0.10, # height_shift_range = 0.10, rotation_range=20, # shear_range = 0.10, zoom_range=0.10, horizontal_flip=True, fill_mode='nearest') val_datagen = ImageDataGenerator() train_datagen.fit(train_data) train_generator = train_datagen.flow(train_data, train_label, batch_size=batch_size) val_datagen.fit(test_data) val_generator = val_datagen.flow(test_data, test_label, batch_size=batch_size) checkpoint = ModelCheckpoint('/HDD/joshua/models/' + 'models_' + model_name, monitor='val_acc', verbose=1, save_best_only=True, mode='max') csv_logger = CSVLogger('log/' + LogFileName + '.log') # steps_per_epoch should be (number of training images total / batch_size) # validation_steps should be (number of validation images total / batch_size) my_model.fit_generator( train_generator, steps_per_epoch=np.shape(train_data)[0] / batch_size, validation_data=val_generator, validation_steps=np.shape(test_data)[0] / batch_size, epochs=epochs, callbacks=[csv_logger, checkpoint]) # my_model.fit_generator(train_generator, # steps_per_epoch= np.shape(train_data)[0] / batch_size, # validation_data= val_generator, # validation_steps= np.shape(test_data)[0] / batch_size, # epochs=epochs, # callbacks = [csv_logger, checkpoint] # ) # my_model.fit(train_data, train_label, # nb_epoch=40, # batch_size=64, # validation_data=(test_data, test_label), # callbacks = [csv_logger] # ) test_pred = my_model.predict(test_data, batch_size=batch_size) # test_pred = my_model.predict(test_data, batch_size = batch_size) cmat = confusion_matrix(np.argmax(test_label, axis=1), np.argmax(test_pred, axis=1)) acc_per_class = cmat.diagonal() / cmat.sum(axis=1) print(acc_per_class) with open('acc_class/' + LogFileName + '.txt', 'w') as f: for item in acc_per_class: f.write('{}, '.format(item))
def NASNet_large_FCN(input_image, weights=None): """ return Model instance """ input_tensor = Input(shape=(input_image)) model = NASNetLarge(input_shape=input_image, input_tensor=input_tensor, include_top=False, weights=weights) #print model.summary() #return #reduce_stem_1 = model.get_layer() normal_18 = model.get_layer(name='normal_concat_18').output activation_normal_concat_7 = model.get_layer(name='activation_118').output activation_normal_concat_8 = model.get_layer(name='activation_130').output activation_normal_concat_9 = model.get_layer(name='activation_142').output activation_normal_concat_10 = model.get_layer(name='activation_154').output activation_normal_concat_11 = model.get_layer(name='activation_166').output activation_normal_concat_12 = model.get_layer(name='activation_178').output """ activation_normal_concat_7 = ScaledLayer()(activation_normal_concat_7) activation_normal_concat_8 = ScaledLayer()(activation_normal_concat_8) activation_normal_concat_9 = ScaledLayer()(activation_normal_concat_9) activation_normal_concat_10 = ScaledLayer()(activation_normal_concat_10) activation_normal_concat_11 = ScaledLayer()(activation_normal_concat_11) activation_normal_concat_12 = ScaledLayer()(activation_normal_concat_12) """ fuse_activation_7_10 = Add()([activation_normal_concat_7, activation_normal_concat_8, activation_normal_concat_9, \ activation_normal_concat_10, activation_normal_concat_11, activation_normal_concat_12]) activation_normal_concat_0 = model.get_layer(name='activation_35').output activation_normal_concat_1 = model.get_layer(name='activation_47').output activation_normal_concat_2 = model.get_layer(name='activation_59').output activation_normal_concat_3 = model.get_layer(name='activation_71').output activation_normal_concat_4 = model.get_layer(name='activation_83').output activation_normal_concat_5 = model.get_layer(name='activation_95').output """ activation_normal_concat_0 = ScaledLayer()(activation_normal_concat_0) activation_normal_concat_1 = ScaledLayer()(activation_normal_concat_1) activation_normal_concat_2 = ScaledLayer()(activation_normal_concat_2) activation_normal_concat_3 = ScaledLayer()(activation_normal_concat_3) activation_normal_concat_4 = ScaledLayer()(activation_normal_concat_4) activation_normal_concat_5 = ScaledLayer()(activation_normal_concat_5) """ fuse_activation_0_5 = Add()([activation_normal_concat_0, activation_normal_concat_1, activation_normal_concat_2, \ activation_normal_concat_3, activation_normal_concat_4, activation_normal_concat_5]) conv_normal_18 = Conv2D(filters=6, kernel_size=(1, 1))(normal_18) upscore_normal_18 = Conv2DTranspose(filters=6, kernel_size=(4, 4), strides=(2, 2), padding='same')(conv_normal_18) conv_fuse_7_10 = Conv2D(filters=6, kernel_size=(1, 1))(fuse_activation_7_10) conv_fuse_7_10 = Add()([conv_fuse_7_10, upscore_normal_18]) upscore_fuse_7_10 = Conv2DTranspose(filters=6, kernel_size=(4, 4), strides=(2, 2), padding='same')(conv_fuse_7_10) conv_fuse_0_5 = Conv2D(filters=6, kernel_size=(1, 1))(fuse_activation_0_5) conv_fuse_0_5 = Add()([conv_fuse_0_5, upscore_fuse_7_10]) upscore = Conv2DTranspose(filters=6, kernel_size=(16, 16), strides=(8, 8), padding='same')(conv_fuse_0_5) model = Model(inputs=input_tensor, outputs=upscore) print model.summary() return model