Beispiel #1
0
def build_model_inception_v2_resnet(lock_base_model: True):
    img_input = Input(shape=INPUT_SHAPE)
    base_model = InceptionResNetV2(input_tensor=img_input, include_top=False, pooling='avg')
    if lock_base_model:
        for layer in base_model.layers:
            layer.trainable = False
    # base_model.summary()
    res = Dense(NB_CLASSES, activation='sigmoid', name='classes', kernel_initializer='zero',
                kernel_regularizer=l1(1e-5))(base_model.layers[-1].output)
    model = Model(inputs=img_input, outputs=res)
    # model.summary()
    return model
    def __init__(self, recent_log=None):
        # Init constants
        # Init classification buffer. Designed to smooth the classification
        self.name_to_load = [Config.DATA_DIR_NAMES[0]] * 5
        self.name = 'InceptionResNet'

        tf.keras.backend.clear_session()
        self.STANDARD_IMAGE_SIZE = (224, 224, 3)
        model = InceptionResNetV2(include_top=False,
                                  weights=None,
                                  input_tensor=None,
                                  input_shape=self.STANDARD_IMAGE_SIZE,
                                  pooling=None)
        x = GlobalAveragePooling2D()(model.output)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        # and a logistic layer -- let's say we have 200 classes
        x = Dense(len(Config.DATA_DIR_NAMES),
                  activation='softmax',
                  name='predictions')(x)

        # create graph of your new model
        self.model = Model(inputs=model.inputs,
                           outputs=x,
                           name='InceptionResNetV2')
        # print(model.summary())

        weights_path = os.path.join(get_absolute_data_path()[:-5],
                                    'josiah_testing', 'run_logs')
        if recent_log is None:
            recent_log_dir = [
                _ for _ in os.listdir(weights_path)
                if str(_).lower().__contains__(self.name.lower())
            ]
            recent_log_dir.sort(reverse=True)
        else:
            recent_log_dir = [recent_log]

        weights_path = os.path.join(weights_path, recent_log_dir[0],
                                    'model.h5')
        print(f'Loading final weight path: {weights_path}')

        # If we want to use weights, then try to load them
        self.model.load_weights(weights_path)
        global graph
        graph = tf.get_default_graph()
def main():
    logging.info("Start preprocessing Images...")
    model = InceptionResNetV2(include_top=False, weights='imagenet')
    logging.info("Finish loading InceptionResNetV2 Model...")

    raw = json.load(open('data/coco_raw.json', 'r'))
    # img_features_map:  {image_id : image_features (height=8, width=8, channels=1536)}
    # used hdf5 to store the map
    img_features_map = h5py.File('data/img_features.hdf5', 'w')
    # other image ids are stored in a list
    other_img = []
    for i, img_raw in enumerate(raw):
        img = Image.open('coco/images/' + img_raw['file_path'])
        # reshape to 299*299 with high-quality downsampling filter
        img_resized = img.resize((299, 299), PIL.Image.ANTIALIAS)
        img_resized = np.asarray(img_resized, dtype=np.uint8)
        img_id = img_raw['id']
        # transform image for preprocessing
        cnn_input = img_resized.copy()
        cnn_input = cnn_input.astype(np.float32)
        cnn_input = np.expand_dims(cnn_input, axis=0)
        # preprocess image, only use data with RGB 3 layers
        if cnn_input.shape == (1, 299, 299, 3):
            cnn_input = preprocess_input(cnn_input)
            # extract features and stores the result
            pred = model.predict(cnn_input)
            img_features = np.squeeze(pred, axis=0)
            # reshape 8*8*1536 to 64*1536
            assert img_features.shape == (8, 8, 1536)
            img_features = np.reshape(img_features, (64, 1536))
            # store hdf5 file: image_features can be retrieved by using img_features_map[str(image_id)].value
            img_features_map.create_dataset(str(img_id), data=img_features)
        # store img_id for images with only one layer
        else:
            other_img.append(img_raw)

        if i % 1000 == 0:
            logging.info("Finished extracting {} images".format(i))

    img_features_map.close()
    cPickle.dump(other_img, open("data/other_img.p", "wb"))
    logging.info('Finished extracting all image features!')
Beispiel #4
0
def inception_resnet_v2(weights="imagenet",
                        input_shape=(224, 224, 3),
                        classes=100,
                        learning_rate=0.00005,
                        frozen=False):
    model = InceptionResNetV2(include_top=False, weights=weights, input_shape=input_shape)

    if frozen:
        model.trainable = False

    x = model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(classes, name='output_1', activation='softmax')(x)
    model = Model(inputs=model.input, outputs=x)

    model.compile(optimizer=tf.compat.v1.keras.optimizers.Adam(learning_rate=learning_rate),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    return model
def train(hyper_params, reset_dataset=False):
    # Define upper level settings InceptionResNetV2
    global DATASET_LOADED, X, Y
    model_dir = f'./run_logs/{time()}_{"_".join("{!s}.{!r}".format(key,val) for (key,val) in hyper_params.items())}'

    # Get the directory path
    data_dir = get_absolute_data_path()
    target_dict = {name: i for i, name in enumerate(Config.DATA_DIR_NAMES)}

    if reset_dataset or not DATASET_LOADED:
        X, Y = load_images(hyper_params['n_classes'], data_dir, target_dict)
        DATASET_LOADED = True

    if hyper_params['sanity_test']:
        X_train, X_test, y_train, y_test = (X, X, Y, Y)
    else:
        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            Y,
                                                            test_size=0.10)

    print("Done building data set")
    model = None
    x = None
    if hyper_params['name'] == 'VGG19':
        model = VGG19(include_top=False,
                      weights=None,
                      input_tensor=None,
                      input_shape=STANDARD_IMAGE_SIZE,
                      pooling=None)
        # If we want to use weights, then try to load them
        if hyper_params['use_weights']:
            model.load_weights(
                './weights/vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5')

    elif hyper_params['name'] == 'InceptionResNetV2':
        model = InceptionResNetV2(include_top=False,
                                  weights=None,
                                  input_tensor=None,
                                  input_shape=STANDARD_IMAGE_SIZE,
                                  pooling=None)
        # If we want to use weights, then try to load them
        if hyper_params['use_weights']:
            model.load_weights(
                './weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
            )
    elif hyper_params['name'] == 'NASNetMobile':
        model = NASNetMobile(include_top=False,
                             weights=None,
                             input_tensor=None,
                             input_shape=STANDARD_IMAGE_SIZE,
                             pooling=None)
        # If we want to use weights, then try to load them
        if hyper_params['use_weights']:
            model.load_weights('./weights/NASNet-mobile-no-top.h5')
    elif hyper_params['name'] == 'MobileNet':
        model = MobileNet(include_top=False,
                          weights=None,
                          input_tensor=None,
                          input_shape=STANDARD_IMAGE_SIZE,
                          pooling=None)
        # If we want to use weights, then try to load them
        if hyper_params['use_weights']:
            model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5')
    elif hyper_params['name'] == 'MobileNetBayesian':
        model = MobileNet(include_top=False,
                          weights=None,
                          input_tensor=None,
                          input_shape=STANDARD_IMAGE_SIZE,
                          pooling=None)
        # If we want to use weights, then try to load them
        if hyper_params['use_weights']:
            model.load_weights('./weights/mobilenet_1_0_224_tf_no_top.h5')
    """ Freeze the previous layers """
    for layer in model.layers:
        layer.trainable = False
    """ By Setting top to False, we need to add our own classification layers """
    # The model documentation notes that this is the size of the classification block
    x = GlobalAveragePooling2D()(model.output)
    # let's add a fully-connected layer
    x = Dense(16, activation='relu')(x)
    # and a logistic layer -- let's say we have 200 classes
    x = Dense(hyper_params['n_classes'],
              activation='softmax',
              name='predictions')(x)

    # create graph of your new model
    model = Model(inputs=model.inputs, outputs=x, name=hyper_params['name'])

    if hyper_params['opt'] == 'sgd':
        opt = SGD(lr=0.01)
    else:
        opt = 'adam'

    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy', 'mean_squared_error'])

    tensorboard = TrainValTensorBoard(log_dir=model_dir,
                                      histogram_freq=0,
                                      X_train=X_train,
                                      X_test=X_test,
                                      y_train=y_train,
                                      y_test=y_test,
                                      write_graph=True,
                                      write_images=False)
    """ Classes are going to be very imbalanced. Weight them """
    class_weights = class_weight.compute_class_weight(
        'balanced', np.unique([y.argmax() for y in y_train]),
        [y.argmax() for y in y_train])
    """ Add image augmentation """
    if hyper_params['use_aug']:
        datagen = tf.keras.preprocessing.image.ImageDataGenerator(
            featurewise_center=True,
            featurewise_std_normalization=True,
            channel_shift_range=0.5,
            rotation_range=180,
            width_shift_range=0.1,
            height_shift_range=0.1,
            brightness_range=[0.5, 1.0],
            horizontal_flip=True,
            vertical_flip=True,
            zoom_range=0.1)
        # compute quantities required for featurewise normalization
        # (std, mean, and principal components if ZCA whitening is applied)
        datagen.fit(X_train)

        # fits the model on batches with real-time data augmentation:
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=32),
                            steps_per_epoch=len(X_train) / 32,
                            epochs=hyper_params['epochs'],
                            validation_data=(X_test, y_test),
                            callbacks=[tensorboard],
                            class_weight=class_weights)
    else:
        model.fit(X_train,
                  y_train,
                  epochs=hyper_params['epochs'],
                  validation_data=(X_test, y_test),
                  callbacks=[tensorboard],
                  class_weight=class_weights)

    print(f'\nEvaluation: {model.evaluate(X_test, y_test)}'
          )  # So this is currently: loss & accuracy
    prediction_y = model.predict(X_test)
    print(f'\nPrediction: {prediction_y}')
    print(f'\nFor Y targets {y_test}')

    # Save entire model to a HDF5 file
    model.save(model_dir + '/model.h5')

    cnf_matrix = confusion_matrix(np.argmax(y_test, axis=1),
                                  np.argmax(prediction_y, axis=1))
    np.set_printoptions(precision=2)
    print(cnf_matrix)
    # plt.figure()
    # plot_confusion_matrix(cnf_matrix, classes=[c for c in target_dict],
    #                       title='Confusion matrix using ' + hyper_params['name'])

    # print(f'Saving confusion matrix to {model_dir + os.sep + "confusion_matrix.jpg"}')
    # plt.savefig(model_dir + os.sep + 'confusion_matrix.jpg')
    #
    # from sklearn.metrics import precision_recall_fscore_support
    # metrics = precision_recall_fscore_support(np.argmax(y_test, axis=1), np.argmax(prediction_y, axis=1),
    #                                           average='weighted')
    # plot_precision_recall_f1(metrics, ['Precision', 'Recall', 'f Score'],
    #                          title='Metrics for ' + hyper_params['name'])
    # print(f'Saving confusion matrix to {model_dir + os.sep + "prec_recall_fscore.jpg"}')
    # plt.savefig(model_dir + os.sep + 'prec_recall_fscore.jpg')
    tf.keras.backend.clear_session()
                  'GE_Basic_LED_60W_Soft_Light',
                  'GE_Basic_LED_90W_Daylight',
                  'GE_Classic_LED_65W_Soft_White',
                  'GE_Vintage_LED_60W_Warm_Light',
                  'OSI_60W_13W_CFL_SOFT_WHITE_6_CT']

app = Flask(__name__)

name_to_load = ['FEIT_40W_T8_TUBE_MCRWV_BULB_120V'] * 5

# loaded_image_locations = pd.read_csv(os.path.join(get_absolute_data_path()[:-5], 'josiah_testing', 'demo',
#                                                   './Lowes Display Sheet - Sheet1.csv'))

tf.keras.backend.clear_session()
STANDARD_IMAGE_SIZE = (224, 224, 3)
model = InceptionResNetV2(include_top=False, weights=None,
                          input_tensor=None, input_shape=STANDARD_IMAGE_SIZE, pooling=None)
x = Flatten(name='flatten')(model.output)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(len(DATA_DIR_NAMES), activation='softmax', name='predictions')(x)

# create graph of your new model
model = Model(inputs=model.inputs, outputs=x, name='InceptionResNetV2')
# print(model.summary())

weights_path = os.path.join(get_absolute_data_path()[:-5], 'josiah_testing', 'demo', 'model.h5')
# If we want to use weights, then try to load them
model.load_weights(weights_path)
global graph
graph = tf.get_default_graph()
"""
we set up a sequential model that we can add layers to
"""
my_new_model = Sequential()
"""
first we add all of pre-trained  model
we've written include_top=False, this is how specify that we want to exlude
the layer that makes prediction into the thousands of categories used in the ImageNet competition
we set the weights to be 'ImageNet' to specify that we use the pre-traind model on ImageNet
pooling equals average says that if we had extra channels in our tensor at the end of this step
we want to collapse them to 1d tensor by taking an average across channels
now we have a pre-trained model that creates the layer before the last layer
that we saw in the slides
"""
my_new_model.add(InceptionResNetV2(weights='imagenet', include_top=False, pooling='avg'))
"""
we add a dense layer to make predictions,
we specify the number of nodes in this layer which in this case is
the number of classes,
then we want to apply the softmax function to turn it into probabilities 
"""
my_new_model.add(Dense(num_classes,activation='softmax',))

"""
we tell tensor flow not to train the first layer which is the  pre-trained model
because that's the model that was already pre-trained with the ImageNet data
"""
my_new_model.layers[0].trainable = False
"""
the compile command tells tensorflow how to update the relationships in the dense connections 
Beispiel #8
0
    #                          loss=tf.nn.sigmoid_cross_entropy_with_logits, metrics=['accuracy'])
    # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3)
    # reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=lr)
    # file_name = 'weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5'
    # save_model = tf.keras.callbacks.ModelCheckpoint('{}'.format(file_name), monitor='val_loss')
    # log_dir = "logs/fit/" + dt.datetime.now().strftime("%Y%m%d-%H%M%S")
    # tensorboard = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
    # model_resnet.fit(ds_img_train, epochs=epochs,
    #                  callbacks=[reduce_lr, early_stopping, save_model, tensorboard],
    #                  validation_data=ds_img_valid)

    print('InceptionResNetV2')  # runs with batch size of 32
    strategy = tf.distribute.MirroredStrategy()
    with strategy.scope():
        base_model = InceptionResNetV2(weights='imagenet',
                                       include_top=False,
                                       input_shape=input_shape)
        x2 = Flatten()(base_model.get_output_at(-1))
        x2 = Dense(32, activation='relu')(x2)
        output2 = Dense(lab_dim, activation='sigmoid')(x2)
        model_resnet = Model(base_model.input, output2)
        model_resnet.compile(optimizer=tf.optimizers.Adam(learning_rate=lr),
                             loss=tf.nn.sigmoid_cross_entropy_with_logits,
                             metrics=['accuracy'])
        early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                          patience=5,
                                                          verbose=True)
        reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                         factor=0.2,
                                                         patience=3,
                                                         min_lr=lr,