def nasnet_retinanet(num_classes, backbone='nasnet', inputs=None, modifier=None, **kwargs): k.clear_session() # choose default input if inputs is None: if keras.backend.image_data_format() == 'channels_first': inputs = keras.layers.Input(shape=(3, None, None)) else: inputs = keras.layers.Input(shape=(None, None, 3)) # create the resnet backbone if backbone == 'nasnet': nasnet_model = NASNetLarge(weights=None, include_top=False, input_tensor=inputs) else: raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone)) # invoke modifier if given if modifier: nasnet_model = modifier(nasnet_model) concatenated_features = [ nasnet_model.get_layer('add_4').output, nasnet_model.get_layer('activation_204').output, nasnet_model.output ] # create the full model return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=concatenated_features, **kwargs)
def evaluation(args): path_img_val = '../datasets/ilsvrc2012/images/val/' path_val_info = '../datasets/ilsvrc2012/images/val.txt' if args.model == 'vgg16': model = VGG16(weights='imagenet') model.summary() elif args.model == 'resnet152': model = ResNet152(weights='imagenet') model.summary() elif args.model == 'resnet152v2': model = ResNet152V2(weights='imagenet') model.summary() elif args.model == 'inceptionresnetv2': model = InceptionResNetV2(weights='imagenet') model.summary() elif args.model == 'densenet201': model = DenseNet201(weights='imagenet') model.summary() elif args.model == 'nasnetlarge': model = NASNetLarge(weights='imagenet') model.summary() name, label = load_header_imagenet(load_file(path_val_info)) pred = list() for i, n in enumerate(name): x = preprocessing_imagenet(path_img_val + n, args) pred.append(np.argmax(model.predict(x), axis=1)[0]) if i % 1000 == 0: print(n) correct = len([p for p, l in zip(pred, label) if p == l]) print('Accuracy of the IMAGENET dataset using model %s: %.4f' % (args.model, correct / len(label)))
def predict(image): model = NASNetLarge() pred = model.predict(image) decoded_predictions = decode_predictions(pred, top=10) response = 'NASNetLarge predictions: ' + str(decoded_predictions[0][0:5]) print(response) np.argmax(pred[0]) return response
def apply_Feature_Extractor_model(params): """ Apply a previously trained model. :param params: Hyperparameters :return: """ model = None if params['MODEL_TYPE'] == 'InceptionV3': model = InceptionV3(weights='imagenet', include_top=False) elif params['MODEL_TYPE'] == 'NASNetLarge': model = NASNetLarge(weights='imagenet', include_top=False) elif params['MODEL_TYPE'] == 'ResNet152': model = ResNet152V2(weights='imagenet', include_top=False) print(model.summary()) base_path = params['DATA_ROOT_PATH'] for s in params['EXTRACT_ON_SETS']: if params['SPLIT_OUTPUT']: path_general = params['STORE_PATH'] + '/' + params.get( 'MODEL_TYPE', 'features') + '/' + s + '/' if not os.path.isdir( path_general): # create dir if it doesn't exist os.makedirs(path_general) list_filepath = base_path + '/' + params['IMG_FILES'][s] image_list = file2list(list_filepath) eta = -1 start_time = time.time() n_images = len(image_list) for n_sample, imname in list(enumerate(image_list)): if params['MODEL_TYPE'] == 'InceptionV3': features = inceptionV3(model, imname) elif params['MODEL_TYPE'] == 'NASNetLarge': features = nasNetLarge(model, imname) elif params['MODEL_TYPE'] == 'ResNet152': features = resNet152(model, imname) # Keras puts the spatial dimensions at the start. We may want to put them at the end if params.get('SPATIAL_LAST', True): features = features.transpose(0, 3, 1, 2) filepath = path_general + imname.split( '/')[-1][:-4] + '.npy' if imname.split( '/')[-1][-4:] == '.jpg' or imname.split('/')[-1][ -4:] == '.png' else path_general + imname.split( '/')[-1] + '.npy' numpy2file(filepath, features, permission='wb', split=False) sys.stdout.write('\r') sys.stdout.write("\t Processed %d/%d - ETA: %ds " % (n_sample, n_images, int(eta))) sys.stdout.flush() eta = (n_images - n_sample) * (time.time() - start_time) / max( n_sample, 1) print("Features saved in", path_general)
def nasNetLarge(weights=None, include_top=False, input_shape=(224, 224, 3)): nasNet = NASNetLarge(weights=weights, include_top=include_top, input_shape=input_shape) if include_top: WRN_WEIGHTS_PATH = "https://www.flyai.com/m/v0.8|NASNet-large.h5" else: WRN_WEIGHTS_PATH = "https://www.flyai.com/m/v0.8|NASNet-large-no-top.h5" filename = WRN_WEIGHTS_PATH.split('|')[-1] fpath = get_file(filename, WRN_WEIGHTS_PATH, cache_subdir=MODEL_PATH) nasNet.load_weights(fpath) print('NASNetLarge weights loaded!') return nasNet
def build(config): image_width = config['image_processing']['image_width'] image_height = config['image_processing']['image_height'] image_channels = config['image_processing']['image_channels'] number_of_classes = config['dataset']['number_of_classes'] model_file = config['model'].get('model_file', None) regularization = config['hyper_parameters'].get('activity_regularizer', None) weights = config['model'].get('weights', None) print("weights:", weights) if weights == 'imagenet': base_model = NASNetLarge(input_shape=(image_width, image_height, image_channels), weights='imagenet', include_top=False) else: base_model = NASNetLarge(input_shape=(image_width, image_height, image_channels), weights=None, include_top=False) x = base_model.output x = GlobalAveragePooling2D()(x) if regularization is not None: regularization = getattr( importlib.import_module(f'keras.regularizers'), regularization['name']) regularization = regularization( **config['hyper_parameters']['activity_regularizer']['params']) predictions = Dense(activity_regularizer=regularization, units=number_of_classes, activation='softmax', name='predictions')(x) model = Model(inputs=base_model.input, outputs=predictions) if weights != 'imagenet' and weights is not None: print(weights, weights is not 'imagenet') model.load_weights(weights) return model
def nasnet_scratch(model_type: Text, in_shape: Union[None, Tuple[int, int, int]], out_units: int): '''NASNet Large (331x331 in default) or Mobile (224x224 in default) :param model_type: "Large" or "Mobile" :param in_shape: input shape in format (H, W, D), or None :param out_units: no. of classes ''' LARGE = 'LARGE' MOBILE = 'MOBILE' if model_type.upper() == LARGE: model = NASNetLarge(input_shape=in_shape, include_top=True, weights=None, input_tensor=None, classes=out_units) elif model_type.upper() == MOBILE: model = NASNetMobile(input_shape=in_shape, include_top=True, weights=None, input_tensor=None, classes=out_units) else: raise RuntimeError( 'invalid model type argument, "{}". "Large" and "Mobile" are supported.' .format(model_type)) return model
def build_nasnetlarge(n_classes): # Clear memory for new model K.clear_session() # Put the Inception V3 (cut out the classifer part) and our custom classifier on top base_model = NASNetLarge(weights='imagenet', include_top=False, input_tensor=Input(shape=(200, 200, 3))) x = base_model.output x = GlobalAveragePooling2D()(x) x = Dense(4096)(x) x = BatchNormalization()(x) x = Activation('relu')(x) x = Dropout(.5)(x) predictions = Dense(n_classes, init='glorot_uniform', W_regularizer=l2(.001), activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) # i.e. freeze all convolutional InceptionV3 layers for layer in base_model.layers: layer.trainable = False # opt = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=1e-6) opt = SGD(lr=0.01, momentum=0.1, decay=1e-6, nesterov=True) model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy', 'top_k_categorical_accuracy']) return model
class Model: def __init__(self, debug=False): self.debug = debug if debug: return model_path = "{}/model/model.h5".format(os.getcwd()) if not os.path.isfile(model_path): print("no model file") exit(1) print("loading model") self.classifier = load_model(model_path) self.nas = NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) self.nas.layers.pop() self.nas.outputs = [self.nas.layers[-1].output] self.nas.layers[-1].outbound_nodes = [] def predict(self, segment): if self.debug: return 1 features = self.nas.predict(np.array(segment)) result = self.classifier.predict( np.array(features).reshape(1, FRAME_INTERVAL, 4032)) return np.argmax(result[0], axis=0)
def _define_model(self): """Defines the CNN used to transform the data """ if self._model_name == "xception": model = Xception(include_top=False, pooling="max", input_shape=(self._height, self._width, self._n_channel)) elif self._model_name == "densenet": model = DenseNet201(include_top=False, pooling="max", input_shape=(self._height, self._width, self._n_channel)) elif self._model_name == "inception": model = InceptionV3(include_top=False, pooling="max", input_shape=(self._height, self._width, self._n_channel)) elif self._model_name == "nasnet": model = NASNetLarge(include_top=False, pooling="max", input_shape=(self._height, self._width, self._n_channel)) else: model = InceptionResNetV2(include_top=False, pooling="max", input_shape=(self._height, self._width, self._n_channel)) # Sometimes we only have one GPU so Keras will automatically detect # this; otherwise we have to specify this setting if self._ngpu <= 1: return model else: return multi_gpu_model(model=model, gpus=self._ngpu)
def nasnetlarge_model(lr, class_num, img_rows=299, img_cols=299, frozen_layer_index=-1, epoch=5, opt='adam'): nasnet_model = NASNetLarge(includ_top=False, input_tensor=None, input_shape=(img_rows, img_cols, 3), weights='imagenet', classes=class_num) for layer in nasnet_model.layers[:frozen_layer_index]: layer.trainable = False x = nasnet_model.outputs x = Dropout(0.7)(x) x = Flatten()(x) predictions = Dense(units=class_num, activation='softmax')(x) if epoch >= 5: if epoch % 5 == 0: lr = lr * 0.1 else: lr = lr model = Model(inputs=nasnet_model.inputs, outputs=predictions) optimizers = get_optimizer(lr, opt) model.summary() model.compile(optimizer=optimizers, loss='categorical_crossentropy', metrics=['accuracy']) return model
def nasenet(image): file_name = 'object' image = load_img(file_name, target_size=(331, 331)) image = np.expand_dims(image, axis=0) image = nas(image) model = NASNetLarge() predection = model.predict(image) preddected = imagenet_utils.decode_predictions(predection) preddected = preddected[0][0] preddected = { "model": 'nasenet', "object_detected": preddected[1], "accurecy": preddected[2] * 100 } return preddected
def finetune_nasnet_large(weights=None, input_shape=None, num_classes=None): base_weright = None fine_weight = None if weights: if weights == 'imagenet': base_weright = weights else: fine_weight = weights basenet = NASNetLarge(weights=base_weright, include_top=False, input_shape=input_shape) finetune_layer_after = "normal_concat_16" # 9% trainable = False for layer in basenet.layers: if layer.name == finetune_layer_after: trainable = True layer.trainable = trainable _input = basenet.input x = basenet.output x = layers.GlobalAveragePooling2D(name='final_gap')(x) _output = layers.Dense(num_classes, activation='softmax', kernel_initializer='he_normal', name='final_dense_mapping')(x) model = Model(inputs=_input, outputs=_output) if fine_weight: model.load_weights(fine_weight) return model
def nas_plus_conv(x_shape,y_shape): nas = NASNetLarge(weights=None, include_top=False) for layer in nas.layers: layer.trainable = False input_2 = Input(shape=x_shape, name='x_train') # Use the generated model output_resnet50_conv_2 = nas(input_2) x2 = Conv2D(64, (3, 3), padding='same')(output_resnet50_conv_2) x2 = BatchNormalization()(x2) x2 = Activation('relu')(x2) # x2= MaxPooling2D(pool_size=(2, 2))(x2) x2 = Dropout(0.30)(x2) x3 = Conv2D(128, (3, 3), padding='same')(x2) x3 = BatchNormalization()(x3) x3 = Activation('relu')(x3) # x3 = MaxPooling2D(pool_size=(2, 2))(x3) x3 = Dropout(0.30)(x3) x4 = Conv2D(128, (3, 3), padding='same')(x3) x4 = BatchNormalization()(x4) x4 = Activation('relu')(x4) # x4 = MaxPooling2D(pool_size=(2, 2))(x4) x4 = Dropout(0.30)(x4) x4 = Flatten(name='flatten_2')(x4) x4 = Dense(256, activation='relu', name='fc2')(x4) x4 = Dropout(0.30)(x4) x4 = Dense(y_shape, activation='softmax', name='fc3')(x4) model = Model(inputs=input_2, outputs=x4) return model
def Build_Model_NasNet(px_train , pSessionParameters , pTrainingParameters ): from keras.layers import Input from keras.applications.nasnet import NASNetLarge, NASNetMobile # Builds a new model ''' Input parameters: px_train: training data to be used to set the input shape of the model pModelBuildParameters: Dict of Parameters to define how the model is built. Return parameters: model ''' BatchNormFlag = pSessionParameters['BatchNorm'] NoClasses = pSessionParameters['ModelBuildParameters']['NoClasses'] Activation = pSessionParameters['ModelBuildParameters']['Activation'] IncludeTopFlag = pSessionParameters['ModelBuildParameters']['IncludeTop'] Model = pSessionParameters['ModelBuildParameters']['Model'] if IncludeTopFlag: if Model == 'NasNetLarge': print('Building Model: NASNetLarge') conv_base = NASNetLarge(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif Model == 'NasNetMobile': print('Building Model: NASNetMobile') conv_base = NASNetMobile(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) else: if Model == 'NasNetLarge': print('Building Model: NASNetLarge') conv_base = NASNetLarge(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') elif Model == 'NasNetMobile': print('Building Model: NASNetMobile') conv_base = NASNetMobile(input_shape=(px_train.shape[1:]), weights=None, include_top=True , classes = NoClasses, pooling='avg') model = models.Sequential() model.add(conv_base) model = Build_Model_AddLayers(model , pSessionParameters ) model.add(layers.Dense(NoClasses, activation=Activation, name='dense_class')) return model
def train(path,BATCH_SIZE,IMG_SIZE,n_classes,save_path): datagenerator = helpers.generate_data(BATCH_SIZE,IMG_SIZE,preprocess_input,path) base_model = NASNetLarge(input_shape=(IMG_SIZE, IMG_SIZE,3), include_top=False, weights='imagenet') compiled_model = helpers.transfer_learning(base_model,n_classes) helpers.fit_model(compiled_model,datagenerator,save_path)
def create_model_NASNetLarge(include_top=False): ''' Load NASNetLarge network pretrained on imagenet :param include_top: include classification layer? :return: NASNetLarge network ''' my_model = NASNetLarge(weights='imagenet', include_top=include_top) return my_model
def cnn_experiment(n): x, y = load_images(grayScale=False) # Try 3 layers with increasing size - 128, 256, 512 '''print("\n\nCNN with increasing layers and filter size:") for layer in range(0, 3): filter_size = 2** (layer + 7) print("%s Layers, Filter size of %s" % (str(layer + 1), str(filter_size))) model = getModel_Filters(n, layer + 1) print(model.summary()) cnn(x, y, n, model) print("\n\nCNN with increasing layers and contant filter size:") #128 to 512 for pwr in range(7, 10): size = 2 ** pwr print("Filter size of %s" % str(size)) for layer in range(0, 3): print("%s Layers" % str(layer + 1)) model = getModel_Layers(n, layer + 1, size) cnn(x, y, n, model) ''' ''' print("\n\nResNet:") base_model = ResNet50(include_top=False,weights='imagenet',input_shape=(64,64,3)) print(base_model.summary()) out = base_model.output out = Flatten()(out) out = Dense(4096, activation='relu')(out) out = Dense(4096, activation='relu')(out) out = Dense(62, activation='softmax')(out) model = Model(inputs=base_model.input, outputs=out) cnn(x, y, n, model, pretrained=True) ''' print("\n\nNASNet:") base_model = NASNetLarge(include_top=False, weights='imagenet', input_shape=(n, n, 3)) print(base_model.summary()) out = base_model.output out = Flatten()(out) out = Dense(4096, activation='relu')(out) out = Dense(4096, activation='relu')(out) out = Dense(62, activation='softmax')(out) model = Model(inputs=base_model.input, outputs=out) cnn(x, y, n, model, pretrained=True)
def save_model12(new_model_path, conv_model_path): model = NASNetLarge( input_shape=(img_width, img_height, 3), include_top=False, weights=None ) if pretrained: model = NASNetLarge( input_shape=(img_width, img_height, 3), include_top=False, weights='imagenet' ) model.summary() transfer_layer = model.get_layer('?') conv_model = Model(inputs=model.input, outputs=transfer_layer.output) new_model = Sequential() new_model.add(conv_model) new_model.add(GlobalAveragePooling2D()) if num_fc_layers>=1: new_model.add(Dense(num_fc_neurons, activation='relu')) if num_fc_layers>=2: new_model.add(Dropout(dropout)) new_model.add(Dense(num_fc_neurons, activation='relu')) if num_fc_layers>=3: new_model.add(Dropout(dropout)) new_model.add(Dense(num_fc_neurons, activation='relu')) new_model.add(Dense(num_classes, activation='softmax')) print(new_model.summary()) new_model.save(new_model_path) conv_model.save(conv_model_path) return
def __init__(self): self.base_model = NASNetLarge(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False, weights=None, pooling='avg') x = Dropout(0.75)(self.base_model.output) x = Dense(10, activation='softmax', name='toplayer')(x) self.model = Model(self.base_model.input, x)
def createModelNasNet(size): nas = NASNetLarge(weights='imagenet', include_top=False, pooling=None, input_shape=size) for layer in nas.layers: layer.trainable = False pool = AveragePooling2D(pool_size=(3, 3))(nas.output) return Model(inputs=nas.input, outputs=pool)
def extract_features(directory, ids, model): if int(model) == 1: print("1") # load ResNet50 model model = ResNet50() input_size = 224 else: print("2") # load NASNetLarge model model = NASNetLarge(input_shape=(331, 331, 3), include_top=True, weights='imagenet', input_tensor=None, pooling=None) input_size = 331 # pops the last layer to get the features model.layers.pop() model = Model(inputs=model.inputs, outputs=model.layers[-1].output) # model.summary() print(len(model.layers)) # model characteristics plot_model(model, to_file='model.png') imgs = load_list(ids) print('Dataset: %d' % len(imgs)) N = len(imgs) print(N) results = [] i = 0 batch_size = 1 #this can be 8 for a GTX 1080 Ti and 32G of RAM while i < N: if i % 1024 == 0: print('{} from {} images.'.format(i, N)) batch = imgs[i:i + batch_size] i += batch_size images = [ load_img(os.path.join(directory, img + ".jpg"), target_size=(input_size, input_size)) for img in batch ] images = [preprocess_input(img_to_array(img)) for img in images] images = np.stack(images) r = model.predict(images) for ind in range(batch_size): results.append(r[ind]) return results
def __init__(self, debug=False): self.debug = debug if debug: return model_path = "{}/model/model.h5".format(os.getcwd()) if not os.path.isfile(model_path): print("no model file") exit(1) print("loading model") self.classifier = load_model(model_path) self.nas = NASNetLarge(input_shape=None, include_top=True, weights='imagenet', input_tensor=None, pooling=None, classes=1000) self.nas.layers.pop() self.nas.outputs = [self.nas.layers[-1].output] self.nas.layers[-1].outbound_nodes = []
def get_model_nasnet_large(): from keras.models import Model from keras.applications.nasnet import NASNetLarge from keras.layers.core import Dense model = NASNetLarge(input_shape=(299, 299, 3), weights=None) x = model.layers[-2].output x = Dense(7178, activation='sigmoid', name='predictions')(x) model = Model(inputs=model.input, outputs=x) return model
def nas_2(x_shape,y_shape): nas = NASNetLarge( weights=None,include_top=False, input_shape=x_shape, pooling='max',classes=y_shape) for layer in nas.layers[:-4]: # if layer.name in ['block5_conv1', 'block4_conv1']: # layer.trainable = True # else: layer.trainable = False model = Sequential() model.add(nas) model.add(Dense(y_shape)) model.add(Activation('softmax')) return model
def get_model(model_name, input_tensor=Input(shape=(96, 96, 3)), num_class=2): ##Modified by Dooman inputs = Input(input_shape) if model_name == "Xception": base_model = Xception(include_top=False, input_shape=input_shape) elif model_name == "ResNet50": base_model = ResNet50(include_top=False, input_shape=input_shape) elif model_name == "ResNet101": base_model = ResNet101(include_top=False, input_shape=input_shape) elif model_name == "InceptionV3": base_model = InceptionV3(include_top=False, input_shape=input_shape) elif model_name == "InceptionResNetV2": base_model = InceptionResNetV2(include_top=False, input_shape=input_shape) elif model_name == "DenseNet201": base_model = DenseNet201(include_top=False, input_shape=input_shape) elif model_name == "NASNetMobile": base_model = NASNetMobile( include_top=False, input_tensor=input_tensor) ##Modified by Dooman elif model_name == "NASNetLarge": base_model = NASNetLarge(include_top=False, input_tensor=input_tensor) if model_name == "VGG16": base_model = VGG16(input_shape=IMAGE_SIZE + [3], weights='imagenet', include_top=False) for layer in base_model.layers: layer.trainable = False x = base_model(inputs) output1 = GlobalMaxPooling2D()(x) output2 = GlobalAveragePooling2D()(x) output3 = Flatten()(x) outputs = Concatenate(axis=-1)([output1, output2, output3]) outputs = Dropout(0.5)(outputs) outputs = BatchNormalization()(outputs) if num_class > 1: outputs = Dense(num_class, activation="softmax")(outputs) else: outputs = Dense(1, activation="sigmoid")(outputs) model = Model(inputs, outputs) model.summary() return model
def build_model_flat(input_image): input_tensor = Input(shape=(input_image)) base_model = NASNetLarge(input_shape=input_image, input_tensor=input_tensor, include_top=False, weights=None) flat = Flatten()(base_model.output) fc_1 = Dense(4096, activation='relu')(flat) output = Dense(10 * 10, activation='sigmoid')(fc_1) model = Model(inputs=input_tensor, outputs=output) model.compile(optimizer='adam', loss=losses.binary_crossentropy) return model
def model_load(): #重みvをimagenetとすると、学習済みパラメータを初期値としてResNet50を読み込む。 base_model = NASNetLarge(weights='imagenet', include_top=False, input_tensor=Input(shape=(img_size, img_size, 3))) #base_model.summary() x = base_model.output #入力を平滑化 x = Flatten()(x) #過学習防止 x = Dropout(.4)(x) return (x, base_model)
def nasnet_large(input_image_size, number_of_output_categories): base_model = NASNetLarge(include_top=False, weights="imagenet", input_shape=(input_image_size, input_image_size, 3)) base_model.trainable = False model = Sequential() model.add(base_model) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.25)) model.add(Dense(1024, activation='relu')) model.add(Dense(number_of_output_categories, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=Adam(), metrics=['accuracy']) return model
def NASNet(self): base_model = NASNetLarge(include_top=False, weights="imagenet", input_shape=(self.height, self.width, self.depth)) model = Sequential() model.add(base_model) model.add(GlobalAveragePooling2D()) model.add(Dense(512, activation='relu')) model.add(Dense(self.classes, activation='softmax')) return model