def layer_activation_model(model, layer_name="conv1_relu"):
    layer = [l for l in model.layers if l.name == layer_name]

    assert len(
        layer) == 1, "Found {} layers with name '{}' instead of 1!".format(
            len(layer), layer_name)

    model = Model(inputs=model.layers[0].output, outputs=layer[0].output)
    model.compile(Adam(), loss="mae", metrics=["mse"])
    model._make_predict_function()

    return model
class KerasFeatureExtractor(FeatureExtractor):
    """A class for interfacing with Keras pretrained encoders.

    On the first usage of a network type the network weights will be 
    downloaded. This may take some time.

    Supported networks are:
        Xception
        VGG16
        VGG19
        ResNet50
        ResNet101
        ResNet152
        ResNet50V2
        ResNet101V2
        ResNet152V2
        ResNeXt50
        ResNeXt101
        InceptionV3
    """
    def __init__(self, net_id, layer_spec="", ckpt_path="", name=""):
        """Initialize a KerasFeatureExtractor instance.
        
        Args:
            net_id: A string identifier of the network.
            layer: The name of the layer to use for feature extraction.
            ckpt_path: A path to the stored model checkpoint.
            name: A string name of the network.
        Raises:
            ValueError: Unsupported network.
        """

        from keras.preprocessing import image
        from keras import Model

        super(KerasFeatureExtractor, self).__init__(name)

        if not net_id in MODELS:
            raise ValueError("Unsupported network %s." % net_id)

        #K.clear_session()

        self.graph = tf.Graph()
        self.session = tf.Session(graph=self.graph)
        K.set_session(self.session)

        with self.graph.as_default():

            weights = 'imagenet' if not ckpt_path else ckpt_path
            enc_spec = MODELS[net_id]
            module = import_module('keras.applications.' + enc_spec.module)
            model_constr = getattr(module, enc_spec.net)
            self._model = model_constr(weights=weights,
                                       include_top=False,
                                       pooling=None)

            if layer_spec:
                layer = self._model.get_layer(layer_spec)
                self._model = Model(inputs=self._model.input,
                                    outputs=layer.output)

            # A Keras bug requires calling this function.
            # See https://github.com/keras-team/keras/issues/6462.
            self._model._make_predict_function()
            self._preprocess_input = getattr(module, 'preprocess_input')
            self._input_size = enc_spec.input_size

    def extract_features(self, images):
        """Extracts features from the images.

        Args:
            images: A Numpy Array of images from the source dataset.
        Returns:
            A Numpy Array of extracted features.
        """

        K.set_session(self.session)

        xs = [self._preprocess_input(x) for x in images]
        xs = np.asarray(xs)
        ys = self._model.predict(xs)

        return ys
예제 #3
0
mdl.load_weights("my_model.h5")

train_datagen = ImageDataGenerator(
    width_shift_range=0.2,
    height_shift_range=0.2,
    shear_range=0.2,
    zoom_range=0.2,
    data_format="channels_first",
    horizontal_flip=False,
    fill_mode='nearest')

base_model = VGG16(weights='imagenet', include_top=False, input_shape=(3, 224, 224))
base_model.summary()

VGG_convolution_only = Model(input=base_model.input, output=base_model.get_layer('block5_pool').output)
VGG_convolution_only._make_predict_function()



while True:
    input()
    ret, img = webcam.read()
    ret, img = webcam.read()
    try:
        img = cnn.img_reprocess(img, crop_size=224, img_size=224, to_greyscale=False)
    except:
        print("Can't find face! next...")
        continue
    # cv2.namedWindow('foobar')
    # cv2.imshow('foobar', img)
    # cv2.waitKey(0)
예제 #4
0
decoder_inputs = Input(shape=(None, num_decoder_tokens))
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _, = decoder_lstm(decoder_inputs,
                                      initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)

model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(optimizer='adam', loss='categorical_crossentropy')

data = (max_seq_len, num_samples, epochs, batch_size, latent_dim, vocab_size)
model_location = os.path.join(here,
                              "model/bot-%d %dsamples (%d-%d-%d-%d).h5" % data)
from keras.models import load_model
model.load_weights(model_location)
model._make_predict_function()
model.summary()

encoder_model = Model(encoder_inputs, encoder_states)
encoder_model._make_predict_function()

decoder_state_input_h = Input(shape=(latent_dim, ))
decoder_state_input_c = Input(shape=(latent_dim, ))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
    decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_states_inputs,
                      [decoder_outputs] + decoder_states)
decoder_model._make_predict_function()
예제 #5
0
파일: cnn.py 프로젝트: tomgond/pyEye
def transfer_learning(images_dir=None,
                      index_path=None,
                      resample_k=10,
                      images_per_batch=4,
                      batch_size=40):
    batch_size = resample_k * images_per_batch
    base_model = VGG16(weights='imagenet',
                       include_top=False,
                       input_shape=(3, 224, 224))
    base_model.summary()

    VGG_convolution_only = Model(
        input=base_model.input,
        output=base_model.get_layer('block5_pool').output)
    VGG_convolution_only._make_predict_function()
    train_set, val_set = random_train_val_split(index_path,
                                                images_dir,
                                                test_ratio=0.8)
    print("[?] Train set size = {0}".format(len(train_set)))
    print("[?] eval set size = {0}".format(len(val_set)))

    train_gen = DataGenerator(index_path,
                              images_dir,
                              predict_model=VGG_convolution_only,
                              subset=train_set,
                              resample_k=resample_k,
                              images_per_batch=images_per_batch)
    val_gen = DataGenerator(index_path,
                            images_dir,
                            predict_model=VGG_convolution_only,
                            subset=val_set,
                            resample_k=resample_k,
                            images_per_batch=images_per_batch)

    my_model = models.transfer_v2()
    adam = keras.optimizers.Adam(lr=0.001,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=None,
                                 decay=0.0,
                                 amsgrad=False)
    adagrad = keras.optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
    rmsprop = keras.optimizers.rmsprop(lr=0.1, decay=0.01)
    # sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    my_model.compile(optimizer=adagrad,
                     loss=euc_dist_keras,
                     metrics=['accuracy'])

    epochs = 15
    print("Training model : \n"
          "Images per batch: {0}\n"
          "Train set images (including aug): {1}\n"
          "Val set images (including aug): {2}\n"
          "Train set steps: {3}\n"
          "Val set steps: {4}\n".format(images_per_batch,
                                        train_gen.total_images_with_aug,
                                        val_gen.total_images_with_aug,
                                        train_gen.steps_per_epoch,
                                        val_gen.steps_per_epoch))

    my_model.fit_generator(
        generator=train_gen.generate(),
        steps_per_epoch=train_gen.steps_per_epoch,
        validation_data=val_gen.generate(),
        validation_steps=val_gen.steps_per_epoch,
        # callbacks=[LearningRateScheduler(lr_schedule),
        #      ModelCheckpoint('model.h5', save_best_only=True)],
        epochs=epochs)
    my_model.save_weights("model.h5")

    os.system(
        "gsutil -m cp model.h5 gs://pyeye_bucket/models/selu_bigger_model_more_samples_train.h5"
    )