def train_classifier(train_gt, train_img_dir, fast_train=False): names = os.listdir(train_img_dir) df = pd.DataFrame({'filename':names}) df['class'] = [*map(lambda name: train_gt[name], df['filename'])] batch_size=10 train_generator = ImageDataGenerator( rescale=1./255, rotation_range=10, zoom_range=0.2, horizontal_flip=True ).flow_from_dataframe( directory=train_img_dir, dataframe=df, target_size=(224, 224), batch_size=batch_size, class_mode='sparse', x_col='filename', y_col='class', ) num_classes = 50 xception = Xception() for layer in xception.layers: # first we optimize new layers only layer.trainable = False activation = xception.get_layer('block14_sepconv2_act').output pool = L.GlobalMaxPooling2D()(activation) dropout = L.Dropout(0.5)(pool) dense = L.Dense(200, activation='relu')(dropout) dense = L.Dense(num_classes, activation='softmax')(dense) model = keras.models.Model(inputs=xception.inputs, outputs=dense) model.compile( optimizer=Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) # localy this was done till convergence model.fit_generator(train_generator, steps_per_epoch= 10, verbose=1) # now optimize whole model for layer in model.layers: layer.trainable = True model.compile( optimizer=Adam(lr=0.0001), loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'] ) # localy this was done till convergence model.fit_generator(train_generator, steps_per_epoch= 10, verbose=1)
def predict_xception(imgs, n=20): print("start doing xception_dresses ...") xception = Xception(include_top=True, weights='imagenet', input_tensor=image_input) last_layer = xception.get_layer('avg_pool').output # out = Dense(num_classes, activation='relu', name='output')(last_layer) custom_xception = Model(image_input, last_layer) for layer in custom_xception.layers: layer.trainable = False custom_xception.summary() predict_xception = custom_xception.predict(imgs, batch_size=n) pd.DataFrame(predict_xception).to_csv(os.path.join( my_path, "../../data/marks/models/xception/predict_xception.csv"), index=False) print("finish xception_dresses ...") return predict_xception
def xception_enc_model(wanted_layers=[0, 5, 6, 24, 27]): """Xception based encoder model with preselected output layers.""" from keras.applications import Xception xception = Xception(include_top=False, weights='imagenet') sepconv2_layers = [ l for l in xception.layers if 'block' in l.name and 'conv2_bn' in l.name or 'conv1_bn' in l.name ] if not wanted_layers: wanted_layers = sepconv2_layers else: if isinstance(wanted_layers[0], int): wanted_layers = [ l.name for n, l in enumerate(sepconv2_layers) if n in wanted_layers ] return Model( inputs=xception.input, outputs=[xception.get_layer(lname) for lname in wanted_layers])
def build_model(): x_model = Xception( input_shape=im_size, include_top=False, weights='imagenet', pooling="avg" ) partial_model = x_model.output for l in x_model.layers: try: x_model.get_layer(l).trainable = False except: pass model = BatchNormalization()(partial_model) model = Dropout(.5)(model) model = Dense(512, activation="elu")(model) model = Dropout(.5)(model) model = Dense(512, activation="elu")(model) model = Dropout(.5)(model) model = Dense(12, activation='softmax')(model) model = Model(input=[x_model.input], output=model) return model
mode='auto', epsilon=0.0001, cooldown=0, min_lr=0) if os.path.exists('dog_single_xception.h5'): model = load_model('dog_single_xception.h5') else: # create the base pre-trained model input_tensor = Input(shape=(299, 299, 3)) base_model1 = Xception(include_top=True, weights='imagenet', input_tensor=None, input_shape=None) base_model1 = Model(inputs=[base_model1.input], outputs=[base_model1.get_layer('avg_pool').output], name='xception') base_model2 = InceptionV3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None) base_model2 = Model(inputs=[base_model2.input], outputs=[base_model2.get_layer('avg_pool').output], name='inceptionv3') img1 = Input(shape=(299, 299, 3), name='img_1') feature1 = base_model1(img1) feature2 = base_model2(img1)
batch_size=BATCH_SIZE, class_mode='categorical') val_generator = test_datagen.flow_from_directory( val_path, target_size=(299,299), batch_size=BATCH_SIZE, class_mode='categorical') print('Loading Xception Weights ...') with tf.device('/cpu:0'): xception_path = 'xception_model/xception_weights_tf_dim_ordering_tf_kernels_notop.h5' inception = Xception(include_top=False, weights=xception_path, input_tensor=None, input_shape=(IMG_WIDTH, IMG_HEIGHT, 3), pooling='avg') output = inception.get_layer(index=-1).output output = Dropout(0.5)(output) output = Dense(NBR_MODELS, activation='softmax', name='predictions')(output) for layer in inception.layers: layer.trainable = False model = Model(outputs=output, inputs=inception.input) if FINE_TUNE: print('Loading Xception Weights in file %s' % best_model_file) #model = multi_gpu_model(model, gpus=2) model.load_weights(best_model_file) if new_classes: with open(new_classes) as f: NBR_MODELS = len(f.readlines())
from keras.applications import VGG16, Xception from keras.models import Model import argparse ap = argparse.ArgumentParser() ap.add_argument("-l", "--layer", required=True, help="Name of layer until which the network should be kept") ap.add_argument("-o", "--output", required=True, help="Path to where the model file should be saved") ap.add_argument("-t", "--network-type", default="vgg16", help="The type of network to create") ap.add_argument("-s", "--input-shape", nargs="+", type=int, default=(187, 187, 3), help="Input shape to first layer of network") args = vars(ap.parse_args()) model = None if args["network_type"].lower() == "xception": model = Xception(input_shape=tuple(args["input_shape"]), weights="imagenet", include_top=False, pooling="avg") else: model = VGG16(input_shape=tuple(args["input_shape"]), weights="imagenet", include_top=False) last_layer = model.get_layer(name=args["layer"]) extractor_model = Model(model.input, last_layer.output) extractor_model.save(args["output"])
def SSD(input_shape, num_classes): """SSD300 architecture. # Arguments input_shape: Shape of the input image, expected to be either (300, 300, 3) or (3, 300, 300)(not tested). num_classes: Number of classes including background. # References https://arxiv.org/abs/1512.02325 """ img_size = (input_shape[1], input_shape[0]) input_shape = (input_shape[1], input_shape[0], 3) xception_input_shape = (299, 299, 3) Input0 = Input(input_shape) xception = Xception(input_shape=xception_input_shape, include_top=False, weights='imagenet') FeatureExtractor = Model(inputs=xception.input, outputs=xception.get_layer('add_11').output) x = FeatureExtractor(Input0) x, pwconv3 = Conv(x, 1024) x, pwconv4 = LiteConv(x, 4, 1024) x, pwconv5 = LiteConv(x, 5, 512) x, pwconv6 = LiteConv(x, 6, 512) x, pwconv7 = LiteConv(x, 7, 256) x, pwconv8 = LiteConv(x, 8, 256) pwconv3_mbox_loc_flat, pwconv3_mbox_conf_flat, pwconv3_mbox_priorbox = prediction( pwconv3, 3, 3, 60.0, None, [2], num_classes, img_size) pwconv4_mbox_loc_flat, pwconv4_mbox_conf_flat, pwconv4_mbox_priorbox = prediction( pwconv4, 4, 6, 105.0, 150.0, [2, 3], num_classes, img_size) pwconv5_mbox_loc_flat, pwconv5_mbox_conf_flat, pwconv5_mbox_priorbox = prediction( pwconv5, 5, 6, 150.0, 195.0, [2, 3], num_classes, img_size) pwconv6_mbox_loc_flat, pwconv6_mbox_conf_flat, pwconv6_mbox_priorbox = prediction( pwconv6, 6, 6, 195.0, 240.0, [2, 3], num_classes, img_size) pwconv7_mbox_loc_flat, pwconv7_mbox_conf_flat, pwconv7_mbox_priorbox = prediction( pwconv7, 7, 6, 240.0, 285.0, [2, 3], num_classes, img_size) pwconv8_mbox_loc_flat, pwconv8_mbox_conf_flat, pwconv8_mbox_priorbox = prediction( pwconv8, 8, 6, 285.0, 300.0, [2, 3], num_classes, img_size) # Gather all predictions mbox_loc = concatenate([ pwconv3_mbox_loc_flat, pwconv4_mbox_loc_flat, pwconv5_mbox_loc_flat, pwconv6_mbox_loc_flat, pwconv7_mbox_loc_flat, pwconv8_mbox_loc_flat ], axis=1, name='mbox_loc') mbox_conf = concatenate([ pwconv3_mbox_conf_flat, pwconv4_mbox_conf_flat, pwconv5_mbox_conf_flat, pwconv6_mbox_conf_flat, pwconv7_mbox_conf_flat, pwconv8_mbox_conf_flat ], axis=1, name='mbox_conf') mbox_priorbox = concatenate([ pwconv3_mbox_priorbox, pwconv4_mbox_priorbox, pwconv5_mbox_priorbox, pwconv6_mbox_priorbox, pwconv7_mbox_priorbox, pwconv8_mbox_priorbox ], axis=1, name='mbox_priorbox') if hasattr(mbox_loc, '_keras_shape'): num_boxes = mbox_loc._keras_shape[-1] // 4 elif hasattr(mbox_loc, 'int_shape'): num_boxes = K.int_shape(mbox_loc)[-1] // 4 mbox_loc = Reshape((num_boxes, 4), name='mbox_loc_final')(mbox_loc) mbox_conf = Reshape((num_boxes, num_classes), name='mbox_conf_logits')(mbox_conf) mbox_conf = Activation('softmax', name='mbox_conf_final')(mbox_conf) predictions = concatenate([mbox_loc, mbox_conf, mbox_priorbox], axis=2, name='predictions') model = Model(inputs=Input0, outputs=predictions) return model
imgs_dir = "/mnt/data/datasets/insight-twitter-images/images/" vecs_path = 'twitter_vectors.npy' img_paths = glob('%s/*.jpg' % imgs_dir)[:20000] fp_paths = open('twitter_paths.txt', 'w') dim = 224 batch_size = 500 imgs_batch = np.zeros((batch_size, dim, dim, 3)) # Instantiate model and chop off some layers. vector_layer = "avg_pool" m1 = Xception() m2 = Model(inputs=m1.input, outputs=m1.get_layer(vector_layer).output) vecs = np.zeros((len(img_paths), m2.output_shape[-1])) for i in range(0, len(img_paths), batch_size): for j in range(batch_size): imgs_batch[j] = get_img(img_paths[i + j], dim) imgs_batch = preprocess_input(imgs_batch, mode='tf') prds_batch = m2.predict(imgs_batch) vecs[i:i + batch_size] = prds_batch fp_paths.write('\n'.join(img_paths[i:i + batch_size]) + '\n') np.save(vecs_path, vecs[:i + batch_size]) print('%d-%d %.3lf %.3lf %.3lf' %
def extract(input_folder, feature_name, feature_folder): X = [] if feature_name == "vgg19": base_model = VGG19(weights='imagenet') save_file_name = 'vgg19_features' model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output) for filename in glob.glob(os.path.join(input_folder, '*.jpg')): img = image.load_img(filename, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = vgg19_pi(x) features = model.predict(x) X.append(features) elif feature_name == "vgg16": base_model = VGG16(weights='imagenet') save_file_name = 'vgg16_features' model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output) for filename in glob.glob(os.path.join(input_folder, '*.jpg')): img = image.load_img(filename, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = vgg16_pi(x) features = model.predict(x) X.append(features) elif feature_name == 'inception': base_model = InceptionV3(weights='imagenet') save_file_name = 'inceptionV3_features' model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output) for filename in glob.glob(os.path.join(input_folder, '*.jpg')): print(filename) img = image.load_img(filename, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = iv3_pi(x) features = model.predict(x) X.append(features) elif feature_name == 'xception': base_model = Xception(weights='imagenet') save_file_name = 'xception_features' model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output) for filename in glob.glob(os.path.join(input_folder, '*.jpg')): img = image.load_img(filename, target_size=(299, 299)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = xc_pi(x) features = model.predict(x) X.append(features) elif feature_name == 'resnet50': base_model = ResNet50(weights='imagenet') save_file_name = 'resnet50_features' model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output) for filename in glob.glob(os.path.join(input_folder, '*.jpg')): img = image.load_img(filename, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) avg_pool_feature = model.predict(x) X.append(avg_pool_feature) elif feature_name == 'lbp': save_file_name = 'lbp_features' for filename in glob.glob(os.path.join(input_folder, '*.jpg')): filename = imread(filename, as_grey=True) lbt_image = local_binary_pattern(filename, P=24, R=3, method='uniform') (lbt_hist, _) = np.histogram(lbt_image.ravel(), bins=int(lbt_image.max() + 1), range=(0, 24 + 2)) X.append(lbt_hist) elif feature_name == 'hog': save_file_name = 'hog_features' for filename in glob.glob(os.path.join(input_folder, '*.jpg')): filename = imread(filename, as_grey=True) lbt_image = hog(filename, block_norm='L2') X.append(lbt_image) else: print('Not support model ' + feature_name) if X: X = np.array(X) X = np.squeeze(X) save_file = os.path.join(feature_folder, save_file_name + '.npy') #if feature_name == 'lbp' or feature_name == 'hog': X = X.T if not os.path.exists(feature_folder): os.makedirs(feature_folder) else: if os.path.isfile(save_file): os.remove(save_file) np.save(save_file, X) else: print('No input received') return X