コード例 #1
0
 def create_MobileNet_with_drop(self, inp_shape, inp_tensor, output_len,
                                weight_path):
     initializer = tf.keras.initializers.glorot_uniform()
     mobilenet_model = mobilenet_v2.MobileNetV2(input_shape=inp_shape,
                                                alpha=1.0,
                                                include_top=True,
                                                weights=None,
                                                input_tensor=inp_tensor,
                                                pooling=None)
     mobilenet_model.layers.pop()
     x = mobilenet_model.get_layer(
         'global_average_pooling2d').output  # 1280
     x = Dense(output_len, name='O_L')(x)
     inp = mobilenet_model.input
     model = Model(inp, x)
     model.load_weights(weight_path)
     model.summary()
     '''revise model and add droput'''
     model.layers.pop()
     x = model.get_layer('global_average_pooling2d').output  # 1280
     x = Dropout(0.5)(x)
     out_landmarks = Dense(output_len,
                           activation=keras.activations.linear,
                           use_bias=False,
                           kernel_initializer=initializer,
                           name='O_L')(x)
     inp = mobilenet_model.input
     revised_model = Model(inp, out_landmarks)
     revised_model.summary()
     revised_model.save_weights('W_ds_wflw_mn_base_with_drop.h5')
     revised_model.save('M_ds_wflw_mn_base_with_drop.h5')
     model_json = revised_model.to_json()
     with open("mobileNet_v2_main.json", "w") as json_file:
         json_file.write(model_json)
     return revised_model
コード例 #2
0
def bpr_predict(model: Model,
                user_id: int,
                item_ids: list,
                user_layer='user_embedding',
                item_layer='item_embedding'):
    """
  Predict by multiplication user vector by item matrix

  :return: list of the scores
  """
    user_vector = model.get_layer(user_layer).get_weights()[0][user_id]
    item_matrix = model.get_layer(item_layer).get_weights()[0][item_ids]

    scores = (np.dot(user_vector, item_matrix.T))

    return scores
コード例 #3
0
ファイル: faster_rcnn.py プロジェクト: xiej23/tf-faster-rcnn
def get_model(feature_extractor, rpn_model, anchors, hyper_params, mode="training"):
    """Generating rpn model for given backbone base model and hyper params.
    inputs:
        feature_extractor = feature extractor layer from the base model
        rpn_model = tf.keras.model generated rpn model
        anchors = (total_anchors, [y1, x1, y2, x2])
            these values in normalized format between [0, 1]
        hyper_params = dictionary
        mode = "training" or "inference"

    outputs:
        frcnn_model = tf.keras.model
    """
    input_img = rpn_model.input
    rpn_reg_predictions, rpn_cls_predictions = rpn_model.output
    #
    roi_bboxes = RoIBBox(anchors, mode, hyper_params, name="roi_bboxes")([rpn_reg_predictions, rpn_cls_predictions])
    #
    roi_pooled = RoIPooling(hyper_params, name="roi_pooling")([feature_extractor.output, roi_bboxes])
    #
    output = TimeDistributed(Flatten(), name="frcnn_flatten")(roi_pooled)
    output = TimeDistributed(Dense(4096, activation="relu"), name="frcnn_fc1")(output)
    output = TimeDistributed(Dropout(0.5), name="frcnn_dropout1")(output)
    output = TimeDistributed(Dense(4096, activation="relu"), name="frcnn_fc2")(output)
    output = TimeDistributed(Dropout(0.5), name="frcnn_dropout2")(output)
    frcnn_cls_predictions = TimeDistributed(Dense(hyper_params["total_labels"], activation="softmax"), name="frcnn_cls")(output)
    frcnn_reg_predictions = TimeDistributed(Dense(hyper_params["total_labels"] * 4, activation="linear"), name="frcnn_reg")(output)
    #
    if mode == "training":
        input_gt_boxes = Input(shape=(None, 4), name="input_gt_boxes", dtype=tf.float32)
        input_gt_labels = Input(shape=(None, ), name="input_gt_labels", dtype=tf.int32)
        rpn_cls_actuals = Input(shape=(None, None, hyper_params["anchor_count"]), name="input_rpn_cls_actuals", dtype=tf.float32)
        rpn_reg_actuals = Input(shape=(None, 4), name="input_rpn_reg_actuals", dtype=tf.float32)
        frcnn_reg_actuals, frcnn_cls_actuals = RoIDelta(hyper_params, name="roi_deltas")(
                                                        [roi_bboxes, input_gt_boxes, input_gt_labels])
        #
        loss_names = ["rpn_reg_loss", "rpn_cls_loss", "frcnn_reg_loss", "frcnn_cls_loss"]
        rpn_reg_loss_layer = Lambda(train_utils.reg_loss, name=loss_names[0])([rpn_reg_actuals, rpn_reg_predictions])
        rpn_cls_loss_layer = Lambda(train_utils.rpn_cls_loss, name=loss_names[1])([rpn_cls_actuals, rpn_cls_predictions])
        frcnn_reg_loss_layer = Lambda(train_utils.reg_loss, name=loss_names[2])([frcnn_reg_actuals, frcnn_reg_predictions])
        frcnn_cls_loss_layer = Lambda(train_utils.frcnn_cls_loss, name=loss_names[3])([frcnn_cls_actuals, frcnn_cls_predictions])
        #
        frcnn_model = Model(inputs=[input_img, input_gt_boxes, input_gt_labels,
                                    rpn_reg_actuals, rpn_cls_actuals],
                            outputs=[roi_bboxes, rpn_reg_predictions, rpn_cls_predictions,
                                     frcnn_reg_predictions, frcnn_cls_predictions,
                                     rpn_reg_loss_layer, rpn_cls_loss_layer,
                                     frcnn_reg_loss_layer, frcnn_cls_loss_layer])
        #
        for layer_name in loss_names:
            layer = frcnn_model.get_layer(layer_name)
            frcnn_model.add_loss(layer.output)
            frcnn_model.add_metric(layer.output, name=layer_name, aggregation="mean")
        #
    else:
        bboxes, labels, scores = Decoder(hyper_params["variances"], hyper_params["total_labels"], name="faster_rcnn_decoder")(
                                         [roi_bboxes, frcnn_reg_predictions, frcnn_cls_predictions])
        frcnn_model = Model(inputs=input_img, outputs=[bboxes, labels, scores])
        #
    return frcnn_model
コード例 #4
0
def test(name):
    # load saved model
    test_model = load_model(str(pathlib.Path(__file__).parent.absolute()) + "/saved_model_final", compile=False)

    # create the model which will extract last convolution layer, global average pooling and dense connections
    extractor = Model(inputs=test_model.inputs,
                      outputs=[test_model.get_layer('global_average_pooling2d').output,
                               test_model.get_layer('conv2d_4').output,
                               test_model.get_layer('dense').output])

    # load image
    im = load_image(str(pathlib.Path(__file__).parent.absolute()) + '/test_data/' + name, resize=True)

    # extract features from the image
    features = extractor(im)

    # print dense weights
    weights = np.squeeze(extractor.get_layer('dense').get_weights()[0])

    # multiply conv layers by weights
    mult = np.sum(features[1] * weights, axis=-1)

    # save activation
    plt.matshow(np.squeeze(mult), cmap='viridis')
    plt.colorbar()
    plt.savefig('activation.png')

    # get result
    print()
    if test_model(im) > 0.1:
        print("There are some tomatoes in the food")
    else:
        print("No tomatoes in the food")
コード例 #5
0
def test_delete_layer_reuse():
    # Create all model layers
    input_1 = Input(shape=[3])
    dense_1 = Dense(3)
    dense_2 = Dense(3)
    dense_3 = Dense(3)
    dense_4 = Dense(3)
    # Create the model
    x = dense_1(input_1)
    x = dense_2(x)
    x = dense_3(x)
    x = dense_2(x)
    output_1 = dense_4(x)
    # TODO: use clean_copy once keras issue 4160 has been fixed
    # model_1 = utils.clean_copy(Model(input_1, output_1))
    model_1 = Model(input_1, output_1)
    # Create the expected modified model
    x = dense_1(input_1)
    x = dense_3(x)
    output_2 = dense_4(x)
    # model_2_exp = utils.clean_copy(Model(input_1, output_2))
    model_2_exp = Model(input_1, output_2)
    # Delete layer dense_2
    model_2 = operations.delete_layer(model_1,
                                      model_1.get_layer(dense_2.name),
                                      copy=False)
    # Compare the modified model with the expected modified model
    assert compare_models(model_2, model_2_exp)
コード例 #6
0
    def build(self):
        const_initializer = tf.keras.initializers.Constant(1.)
        # input layer
        scene = Input(name='input', shape=(Quantifier.scene_len, len(symbols)))
        # conv
        conv = Conv1D(filters=self._num_kernels,
                      kernel_size=1,
                      kernel_initializer=const_initializer,
                      trainable=False,
                      use_bias=False,
                      name='conv')(scene)
        # split the
        splitters = tf.split(conv, self._num_kernels, axis=2, name='split')
        # flats
        flats = [
            Flatten(name='flat_{i}'.format(i=i))(splitters[i])
            for i in range(self._num_kernels)
        ]
        # dropouts after convolutions
        dropouts = [
            Dropout(rate=0.15, name='dropout_{i}'.format(i=i))(flats[i])
            for i in range(self._num_kernels)
        ]

        # single neuron summarizers
        denses = [
            Dense(
                1,
                kernel_initializer=const_initializer,
                use_bias=False,
                trainable=False,
                # activation='relu',
                name='dense_{i}'.format(i=i))(dropouts[i])
            for i in range(self._num_kernels)
        ]
        # merge feature extractors
        merge = tf.concat(denses, axis=1, name='concatenate')
        # softmax layer
        softmax = Dense(len(self._quantifier_names),
                        kernel_initializer=const_initializer,
                        use_bias=False,
                        trainable=True,
                        activation='softmax',
                        name="softmax")(merge)
        # inputs outputs
        model = Model(inputs=scene, outputs=softmax)
        # set weights
        conv = model.get_layer('conv')
        conv.set_weights([
            np.array([self._kernels
                      ]).transpose().reshape(1, 4, self._num_kernels)
        ])
        print(conv.get_weights())
        # compile model
        model.compile(
            loss='categorical_crossentropy',
            optimizer='adam',
            metrics=[tf.keras.metrics.Precision(),
                     tf.keras.metrics.Recall()])
        return model
コード例 #7
0
    def get_model_dimensions(model: Model,
                             embedding_layer_name: str = "embedding") -> tuple:
        dims: tuple = (-1, -1, -1)
        try:
            # Get layers
            input_layer: InputLayer = model.get_layer(index=0)
            embedding_layer: Dense = model.get_layer(name=embedding_layer_name)
            output_layer: Activation = model.get_layer(index=-1)

            # Get dims
            dims: tuple = (input_layer.output_shape,
                           embedding_layer.output_shape,
                           output_layer.output_shape)
        except Exception as e:
            logger.error(e)
        return dims
コード例 #8
0
def layer_test_helper_merge_2d(layer, channel_index, data_format):
    # This should test that the output is the correct shape so it should pass
    # into a Dense layer rather than a Conv layer.
    # The weighted layer is the previous layer,
    # Create model
    input_shape = list(random.randint(10, 20, size=3))
    input_1 = Input(shape=input_shape)
    input_2 = Input(shape=input_shape)
    x = Conv2D(3, [3, 3], data_format=data_format, name='conv_1')(input_1)
    y = Conv2D(3, [3, 3], data_format=data_format, name='conv_2')(input_2)
    x = layer([x, y])
    x = Flatten()(x)
    main_output = Dense(5, name='dense_1')(x)
    model = Model(inputs=[input_1, input_2], outputs=main_output)

    # Delete channels
    del_layer = model.get_layer('conv_1')
    del_layer_2 = model.get_layer('conv_2')
    surgeon = Surgeon(model)
    surgeon.add_job('delete_channels', del_layer, channels=channel_index)
    surgeon.add_job('delete_channels', del_layer_2, channels=channel_index)
    new_model = surgeon.operate()
    new_w = new_model.get_layer('dense_1').get_weights()

    # Calculate next layer's correct weights
    flat_sz = np.prod(layer.get_output_shape_at(0)[1:])
    channel_count = getattr(del_layer, utils.get_channels_attr(del_layer))
    channel_index = [i % channel_count for i in channel_index]
    if data_format == 'channels_first':
        delete_indices = [
            x * flat_sz // channel_count + i for x in channel_index
            for i in range(
                0,
                flat_sz // channel_count,
            )
        ]
    elif data_format == 'channels_last':
        delete_indices = [
            x + i for i in range(0, flat_sz, channel_count)
            for x in channel_index
        ]
    else:
        raise ValueError
    correct_w = model.get_layer('dense_1').get_weights()
    correct_w[0] = np.delete(correct_w[0], delete_indices, axis=0)

    assert weights_equal(correct_w, new_w)
コード例 #9
0
ファイル: lstm.py プロジェクト: ZhiliangWu/etips
def build_bandit_lstm_classifier(timesteps=32,
                                 feature_size=784,
                                 output_shape=3,
                                 repr_size=64,
                                 activation='tanh',
                                 inp_drop=0.0,
                                 re_drop=0.0,
                                 l2_coef=1e-3,
                                 lr=3e-4,
                                 translation=0.0):
    seq_inputs = layers.Input(shape=(timesteps, feature_size),
                              name='Sequential_Input')
    x = layers.Masking(mask_value=0, name='Masking')(seq_inputs)
    x = layers.LSTM(repr_size,
                    activation=activation,
                    use_bias=True,
                    dropout=inp_drop,
                    recurrent_dropout=re_drop,
                    return_sequences=False,
                    name='Sequential_Representation')(x)
    class_pred = layers.Dense(output_shape,
                              activation='softmax',
                              use_bias=True,
                              kernel_regularizer=l2(l2_coef),
                              name='Class_Prediction')(x)
    action = layers.Input(shape=(output_shape, ),
                          name='Action_Input',
                          dtype=tf.int32)
    propen = layers.Input(shape=(), name='Propensity_Input', dtype=tf.float32)
    delta = layers.Input(shape=(), name='Delta_Input', dtype=tf.float32)
    ips_loss = IpsLossLayer(translation=translation, name='ipsloss')(
        [class_pred, action, propen, delta])
    m = Model(inputs=[seq_inputs, action, propen, delta],
              outputs=ips_loss,
              name='training')
    m.add_loss(ips_loss)
    m.compile(optimizer=Adam(lr=lr))
    test_m = Model(inputs=m.get_layer('Sequential_Input').input,
                   outputs=m.get_layer('Class_Prediction').output,
                   name='testing')
    test_m.compile(optimizer=Adam(lr=lr),
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])

    print('model is built and compiled')

    return m, test_m
コード例 #10
0
def load_model():
    global model

    model = tf.keras.models.load_model('/static/models/' + MODEL)

    layer_name = 'embedding_4'
    model = Model(inputs=model.input,
                  outputs=model.get_layer(layer_name).output)
コード例 #11
0
def get_vgg_layer(model: Model,
                  layer_name: str,
                  model_name: str = None) -> keras.models.Model:
    layer = model.get_layer(layer_name)
    try:
        output = layer.get_output_at(1)
    except:
        output = layer.get_output_at(0)
    return keras.models.Model(model.layers[0].input, output, name=model_name)
コード例 #12
0
 def __generate_model():
     model = EfficientNetB7(include_top=False, input_shape=config.SEGMENTER_IM_SIZE)
     model = Model(inputs=model.input, outputs=model.output)
     layer = GlobalAveragePooling2D(name="top_g_a_pool")(model.output)
     layer = Dropout(rate=0.7, name="dropout")(layer)
     layer = Dense(config.CLASS_NUMBER, activation="softmax", name="output")(layer)
     model = Model(inputs=model.input, outputs=layer)
     model.load_weights(config.FE_WEIGHTS_PATH)
     model = Model(inputs=model.input, outputs=model.get_layer("top_g_a_pool").output)
     return model
コード例 #13
0
def BNInception(end_layer=None):
    config = configparser.ConfigParser()
    config.read(expanded_join('config.ini'))

    model_path = config['PROJECT_FOLDERS']['DATA_PATH']
    model = load_model(expanded_join(model_path, 'BN-Inception_notop.h5'))

    if not end_layer is None:
        model = Model(inputs=model.input, outputs=model.get_layer(name=end_layer).output)

    return model
コード例 #14
0
def get_model(use_model):
    if use_model == 'inception':
        imgsize = (299,299)
        model = InceptionV3(weights='imagenet',include_top=False)
    elif use_model == 'vgg16':
        imgsize = (224,224)
        model = VGG16(weights='imagenet',include_top=True)          ### right before softmax?
        model = Model(inputs=model.input, outputs=model.get_layer('fc2').output) #######
    else:
        raise NotImplemented('model '+use_model+' is not there')
    return model, imgsize
コード例 #15
0
def GoogleNet(end_layer=None):
    config = configparser.ConfigParser()
    config.read(expanded_join('config.ini'))

    model_path = config['PROJECT_FOLDERS']['DATA_PATH']
    model = load_model(expanded_join(model_path, 'GoogleNet_notop.h5'), custom_objects={'LRN': LRN})

    if not end_layer is None:
        model = Model(inputs=model.input, outputs=model.get_layer(name=end_layer).output)

    return model
コード例 #16
0
def get_cam_model(model_class,
                  num_classes,
                  input_size=224,
                  last_conv_layer='activation_49',
                  pred_layer='fc1000'):
    model = model_class(input_shape=(input_size, input_size, 3))
    model.summary()

    final_params = model.get_layer(pred_layer).get_weights()
    final_params = (final_params[0].reshape(1, 1, -1,
                                            num_classes), final_params[1])

    last_conv_output = model.get_layer(last_conv_layer).output
    x = UpSampling2D(size=(32, 32), interpolation='bilinear')(last_conv_output)
    x = Conv2D(filters=num_classes, kernel_size=(1, 1),
               name='predictions_2')(x)

    cam_model = Model(inputs=model.input, outputs=[model.output, x])
    cam_model.get_layer('predictions_2').set_weights(final_params)
    return cam_model
コード例 #17
0
class PC_Space_A(BaseModel):
    
    def __init__(self, latent_dim, beta, 
                 encoder_layer_dim, decoder_layer_dims):
        self.LATENT_DIM = latent_dim
        self.BETA = beta
        self.ENCODER_LAYER_DIM = encoder_layer_dim
        self.DECODER_LAYER_DIMS = decoder_layer_dims
        self.INPUT_DIM = PREMISE_TOKEN_DIMENSION
        self.OUTPUT_DIM = np.load(TRAINING() + 'PC_train_conjecture_token_bag.npy').shape[1]
        
        # recursive encoder
        token = Input(shape=(PREMISE_TOKEN_DIMENSION,), name='input_token')
        left = Input(shape=(PREMISE_TOKEN_DIMENSION,), name='input_left')
        right = Input(shape=(PREMISE_TOKEN_DIMENSION,), name='input_right')
        
        token_h = Dense(PREMISE_TOKEN_DIMENSION, activation='relu')(token)
        left_h = Dense(PREMISE_TOKEN_DIMENSION, activation='relu')(left)
        right_h = Dense(PREMISE_TOKEN_DIMENSION, activation='relu')(right)
        
        tree = Add()([token_h, left_h, right_h])
        tree = Activation('relu', name='tree_encoding')(tree)

        # build latent distribution
        h = Dense(self.ENCODER_LAYER_DIM, activation='relu')(tree)
        
        z_mean = Dense(self.LATENT_DIM)(h)
        z_log_std = Dense(self.LATENT_DIM)(h)
        z_mean, z_log_std = KLDivergenceLayer(beta=self.BETA)([z_mean, z_log_std])
        z_std = Lambda(lambda t: K.exp(.5*t))(z_log_std)
        
        # build dist encoder
        self.distencoder = Model(inputs=[token, left, right], outputs=z_mean, name='distencoder')
        
        # build distribution reparameterization (move noise out of gradient)
        noise = Input(tensor=K.random_normal(stddev=1.0, shape=(K.shape(token)[0], self.LATENT_DIM)))
        z_noise = Multiply()([z_std, noise])
        z = Add(name='latent_space')([z_mean, z_noise])
        
        # build decoder
        decoder = Sequential()
        decoder.add(Dense(self.DECODER_LAYER_DIMS[0], input_dim=self.LATENT_DIM, activation='relu'))
        for dim in self.DECODER_LAYER_DIMS[1:]:
            decoder.add(Dense(dim, activation='relu'))
        decoder.add(Dense(self.OUTPUT_DIM, activation='softmax', name='classifier'))
        self.decoder = decoder
        
        # combine model
        self.model = Model(inputs=[token, left, right, noise], outputs=decoder(z), name='pc_space_a')
        self.model.compile(optimizer='rmsprop', loss=nll)
        
        # build encoder
        self.encoder = self.model.get_layer('tree_encoding').output
コード例 #18
0
def get_embeddings_model(embedding_matrix):
    hypo_input = Input(shape=(1, ))
    hyper_input = Input(shape=(1, ))

    word_embedding = Embedding(embedding_matrix.shape[0],
                               embedding_matrix.shape[1],
                               name='TermEmbedding',
                               embeddings_constraint=UnitNorm(axis=1))

    hypo_embedding = word_embedding(hypo_input)
    hyper_embedding = word_embedding(hyper_input)

    embedding_model = Model(inputs=[hypo_input, hyper_input],
                            outputs=[hypo_embedding, hyper_embedding])

    # inject pre-trained embeddings into this mini, resusable model/layer
    embedding_model.get_layer(name='TermEmbedding').set_weights(
        [embedding_matrix])
    embedding_model.get_layer(name='TermEmbedding').trainable = False

    return embedding_model
コード例 #19
0
def get_embeddings_model(embeddings_matrix, synonym_sample_n, trainable=False):
    hypo_input = Input(shape=(1, ), name='Hyponym')
    neg_input = Input(shape=(synonym_sample_n, ), name='Negative')
    hyper_input = Input(shape=(1, ), name='Hypernym')

    embeddings_layer_1 = Embedding(embeddings_matrix.shape[0],
                                   embeddings_matrix.shape[1],
                                   input_length=1,
                                   name='TermEmbedding',
                                   embeddings_constraint=UnitNorm(axis=1))

    embeddings_layer_2 = Embedding(embeddings_matrix.shape[0],
                                   embeddings_matrix.shape[1],
                                   input_length=synonym_sample_n,
                                   name='NegEmbedding',
                                   embeddings_constraint=UnitNorm(axis=1))

    hypo_embedding = embeddings_layer_1(hypo_input)
    neg_embedding = embeddings_layer_2(neg_input)
    hyper_embedding = embeddings_layer_1(hyper_input)

    embedding_model = Model(
        inputs=[hypo_input, neg_input, hyper_input],
        outputs=[hypo_embedding, neg_embedding, hyper_embedding])

    # inject pre-trained embeddings into this mini, resusable model/layer
    embedding_model.get_layer(name='TermEmbedding').set_weights(
        [embeddings_matrix])
    embedding_model.get_layer(name='TermEmbedding').trainable = trainable

    embedding_model.get_layer(name='NegEmbedding').set_weights(
        [embeddings_matrix])
    embedding_model.get_layer(name='NegEmbedding').trainable = False

    return embedding_model
コード例 #20
0
    def load_model_segment(self):
        mbl = applications.mobilenet.MobileNet(weights=None,
                                               include_top=False,
                                               input_shape=(160, 320, 3))
        x = mbl.output
        model_tmp = Model(inputs=mbl.input, outputs=x)
        layer5, layer8, layer13 = model_tmp.get_layer(
            'conv_pw_5_relu').output, model_tmp.get_layer(
                'conv_pw_8_relu').output, model_tmp.get_layer(
                    'conv_pw_13_relu').output

        fcn14 = Conv2D(filters=2, kernel_size=1, name='fcn14')(layer13)
        fcn15 = Conv2DTranspose(filters=layer8.get_shape().as_list()[-1],
                                kernel_size=4,
                                strides=2,
                                padding='same',
                                name='fcn15')(fcn14)
        fcn15_skip_connected = Add(name="fcn15_plus_vgg_layer8")(
            [fcn15, layer8])
        fcn16 = Conv2DTranspose(filters=layer5.get_shape().as_list()[-1],
                                kernel_size=4,
                                strides=2,
                                padding='same',
                                name="fcn16_conv2d")(fcn15_skip_connected)
        # Add skip connection
        fcn16_skip_connected = Add(name="fcn16_plus_vgg_layer5")(
            [fcn16, layer5])
        # Upsample again
        fcn17 = Conv2DTranspose(filters=2,
                                kernel_size=16,
                                strides=(8, 8),
                                padding='same',
                                name="fcn17",
                                activation="softmax")(fcn16_skip_connected)
        m = Model(inputs=mbl.input, outputs=fcn17)
        m.load_weights(self.path +
                       'model-mobilenet-iter2-pretrain-data-bdd.h5')
        m.predict(np.zeros((1, 160, 320, 3), dtype=np.float32))
        print("Model loaded")
        return m
コード例 #21
0
def get_keras_functor(model_path="my/path/to/model.h5"):
    """
    Create CNN-model structure for Heatmap
    """
    custom_objects = {"GlorotUniform": tf.keras.initializers.glorot_uniform}
    model = load_model(model_path, custom_objects)

    img_in = Input(shape=(120, 160, 3), name='img_in')
    x = img_in
    x = Convolution2D(24, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name='conv2d_1')(x)
    x = Convolution2D(32, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name='conv2d_2')(x)
    x = Convolution2D(64, (5, 5),
                      strides=(2, 2),
                      activation='relu',
                      name='conv2d_3')(x)
    x = Convolution2D(64, (3, 3),
                      strides=(2, 2),
                      activation='relu',
                      name='conv2d_4')(x)
    conv_5 = Convolution2D(64, (3, 3),
                           strides=(1, 1),
                           activation='relu',
                           name='conv2d_5')(x)
    convolution_part = Model(inputs=[img_in], outputs=[conv_5])

    for layer_num in ('1', '2', '3', '4', '5'):
        convolution_part.get_layer('conv2d_' + layer_num).set_weights(
            model.get_layer('conv2d_' + layer_num).get_weights())
    inp = convolution_part.input  # input placeholder
    outputs = [layer.output
               for layer in convolution_part.layers][1:]  # all layer outputs
    functor = K.function([inp], outputs)
    return functor
コード例 #22
0
ファイル: autoencoder.py プロジェクト: safanna/pet_projects
class Autoencoder:
    def __init__(self, input_dim, hid_dim, epoch=500, batch_size=10):
        self.epoch = epoch
        self.batch_size = batch_size
        self.hid_dim = hid_dim
        x = Input((input_dim,))
        dense = Dense(self.hid_dim)(x)
        encoded = Activation('relu')(dense)
        decoded = Dense(input_dim, name='decoded')(encoded)
        self.model = Model(inputs=x, outputs=[decoded, encoded])
        
    def train(self, data, learning_rate=0.001):
        train_op = RMSprop(learning_rate)
        self.model.compile(loss={"decoded": self.root_mean_squared_error}, optimizer=train_op)

        self.model.fit(data, data, epochs = self.epoch, batch_size = self.batch_size)
        self.model.save("./autoencoder.h5")
            
    @staticmethod
    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) 
        
    def test(self, data):
        pred = self.model.predict(data)
        print('input', data)
        print('compressed', pred[1])
        print('reconstructed', pred[0])
        return pred

    def classify(self, data, labels, ind=7):
        reconstructed, hidden = self.model.predict(data)
        print('data', np.shape(data))
        print('reconstructed', np.shape(reconstructed))
        loss = np.sqrt(np.mean(np.square(data - reconstructed), axis=1))
        print('loss', np.shape(loss))
        subj_indices = np.where(labels == ind)[0]
        not_subj_indices = np.where(labels != ind)[0]
        subj_loss = np.mean(loss[subj_indices])
        not_subj_loss = np.mean(loss[not_subj_indices])
        print('subj', subj_loss)
        print('not subj', not_subj_loss)
        return hidden

    
    def decode(self, encoding):
        inputs = Input((self.hid_dim,))
        outputs = self.model.get_layer('decoded')(inputs)
        model_dec = Model(inputs, outputs)
        reconstructed = model_dec.predict(encoding)
        img = np.reshape(reconstructed, (32, 32))
        return img
コード例 #23
0
    def __init__(self,
                 model: KM.Model,
                 val_generator: DataGenerator,
                 layers: int = 1):
        super(EvalCallback, self).__init__()

        self.val_generator = val_generator
        # Extract model object relevant to evaluate classification performance
        self.evaluator = KM.Model(
            inputs=model.input,
            outputs=model.get_layer("logits").output,
        )
        # Compile the model object with losses and performance metrics
        self.evaluator.compile(loss=None,
                               metrics=MultiLayerAccuracy(layers=layers))
コード例 #24
0
ファイル: factory.py プロジェクト: christian-rncl/mtcnn
    def build_P_R_O_nets_from_file(self, weights_file, include_top=True):
        weights = np.load(weights_file, allow_pickle=True).tolist()

        p_net = self.build_pnet()
        r_net = self.build_rnet()
        o_net = self.build_onet()

        p_net.set_weights(weights['pnet'])
        r_net.set_weights(weights['rnet'])
        o_net.set_weights(weights['onet'])

        if not include_top:
            o_net = Model(o_net.input, o_net.get_layer(name='conv2d_11').output)

        return p_net, r_net, o_net
コード例 #25
0
class AAE(Model):
    def __init__(self, checkpoint):
        super(AAE, self).__init__()
        self.latent_dim = config.NZ
        self.inference_net = invG().build()
        self.generator_net = checkpoint.generator
        ori_discriminator = checkpoint.discriminator
        self.discriminator = Model(
            inputs=ori_discriminator.inputs,
            outputs=[ori_discriminator.get_layer('conv2d_3').output])
        self.optimizer = tf.keras.optimizers.Adam(config.LR * 10)

    @tf.function
    def decode(self, latent):
        return self.generator_net(latent)

    @tf.function
    def encode(self, x):
        return self.inference_net(x)

    @tf.function
    def get_disc_last_conv(self, x):
        return self.discriminator.get_layer('conv2d_3').output

    @tf.function
    def compute_loss(self, x):
        latent = self.encode(x)
        _x = self.decode(latent)
        dis_ori = self.discriminator(x)
        dis_gen = self.discriminator(_x)
        ld = tf.reduce_mean(tf.square(dis_ori - dis_gen))
        lr = tf.reduce_mean(tf.square(x - _x))
        return 100 * config.LAMBDA * lr + (1 - config.LAMBDA) * ld

    @tf.function
    def compute_gradients(self, x):
        with tf.GradientTape() as tape:
            loss = self.compute_loss(x)
        return tape.gradient(loss, self.trainable_variables)

    @tf.function
    def train(self, train_x):
        gradients = self.compute_gradients(train_x)
        self.optimizer.apply_gradients(zip(gradients,
                                           self.trainable_variables))

    def represent(self, x):
        return self.decode(self.encode(x))
コード例 #26
0
    def get_head_with_pretrained_weights(self,
                                         encoder_input,
                                         pretrained=True,
                                         pretrained_layer_1=True,
                                         **kwargs):
        head = self._build_fusion(encoder_input,
                                  self._build_encoder(encoder_input))
        head_model = Model(encoder_input, head, name=self.name + "_head")

        if not pretrained:
            # for layer in head_model.layers:
            #     print(layer.name, layer.trainable)
            return head_model

        # load weights if pretrained is True
        head_model.load_weights(self.checkpoint_path,
                                by_name=True,
                                skip_mismatch=True)
        for layer in head_model.layers:
            if layer.name in encoder_input.name or layer.name == "conv2d_0":
                layer.trainable = True
            else:
                layer.trainable = False
            # print(layer.name, layer.trainable)

        if not pretrained_layer_1:
            return head_model

        model_layer_1 = self.model.get_layer(index=1)
        head_layer_1 = head_model.get_layer(index=1)

        [pretrained_weights, pretrained_bias] = model_layer_1.get_weights()
        new_weights = [
            concatenate(
                [pretrained_weights, pretrained_weights, pretrained_weights],
                axis=2) / 3, pretrained_bias
        ]
        assert tuple([layer.shape
                      for layer in new_weights]) == tuple([layer.shape
                                                           for layer in head_layer_1.get_weights()]), \
            "Shape for layer 1 mismatch, try using pretrained_layer_1 = False"
        head_layer_1.set_weights(new_weights)

        head_layer_1.trainable = kwargs.get("layer_1_trainable_param", True)

        return head_model
コード例 #27
0
def create_multi_inceptionv3(inceptionv3_model, inp_size, rz_size, class_num):
    inputs = Input(shape=(inp_size, inp_size, 3))
    resize = Lambda(Resize, (rz_size, rz_size, 3))(inputs)

    inception_v3 = inceptionv3_model.get_layer('inception_v3')
    conv = inception_v3(resize)

    # resize, for the same size with original pic, concat for imshow
    resized_conv = Lambda(Resize, (rz_size, rz_size, 3))(conv)

    GAV = GlobalAveragePooling2D()(conv)

    dense = inceptionv3_model.get_layer('dense')
    outputs = dense(GAV)
    middle_model = Model(inputs, [resized_conv, outputs])

    # the last dense layer's weight  2048*class_num
    w = middle_model.get_layer('dense').weights[0].numpy()
    return middle_model, w
コード例 #28
0
ファイル: utils.py プロジェクト: torresxavier/cgm-ml
class GradCAM:
    def __init__(self, model, layerName):
        self.model = model
        self.layerName = layerName

        self.gradModel = Model(inputs=[self.model.inputs],
                               outputs=[
                                   self.model.get_layer(self.layerName).output,
                                   model.output
                               ])

    def compute_heatmap(self, image, classIdx, eps=1e-8):

        with tf.GradientTape() as tape:
            tape.watch(self.gradModel.get_layer(self.layerName).output)
            inputs = tf.cast(image, tf.float32)
            (convOutputs, predictions) = self.gradModel(inputs)
            if len(predictions) == 1:
                loss = predictions[0]
            else:
                loss = predictions[:, classIdx]

        grads = tape.gradient(loss, convOutputs)

        castConvOutputs = tf.cast(convOutputs > 0, "float32")
        castGrads = tf.cast(grads > 0, "float32")
        guidedGrads = castConvOutputs * castGrads * grads

        convOutputs = convOutputs[0]
        guidedGrads = guidedGrads[0]

        weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
        cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)

        (w, h) = (image.shape[2], image.shape[1])
        heatmap = cv2.resize(cam.numpy(), (w, h))

        numer = heatmap - np.min(heatmap)
        denom = (heatmap.max() - heatmap.min()) + eps
        heatmap = numer / denom
        heatmap = (heatmap * 255).astype("float32")
        return heatmap
コード例 #29
0
def extract_all(in_dir, out_dir, model_filepath):
    extractor = load_model(model_filepath)
    extractor = Model(extractor.input,
                      extractor.get_layer(name="encod_dense").output)
    generator = get_generator(in_dir,
                              batch_size=256,
                              image_size=(32, 32),
                              preprocessing_function=None,
                              rescale=1.0 / 255.0)

    if os.path.exists(out_dir):
        print("Removing output directory. Continue? [y/n]")
        if input().lower() == 'y':
            print("Deleting...")
            shutil.rmtree(out_dir)
        else:
            print("Aborting...")
            return
    os.makedirs(out_dir)

    folds_files = [
        fold_file.split(os.sep)[-1]
        for fold_file in glob(os.path.join(in_dir, "fold_*.txt"))
    ]
    for fold_file in folds_files:
        shutil.copyfile(os.path.join(in_dir, fold_file),
                        os.path.join(out_dir, fold_file))

    classes_map = {val: key for key, val in generator.class_indices.items()}
    for _ in range(len(generator)):
        x, y = generator.next()
        out = extractor.predict(x)
        for cur_sample, cur_class in zip(out, y):
            cur_out_file = os.path.join(out_dir,
                                        classes_map[cur_class] + ".npy")
            if os.path.isfile(cur_out_file):
                cur_arr = np.load(cur_out_file)
                cur_arr = np.concatenate(
                    (cur_arr, np.expand_dims(cur_sample, axis=0)), axis=0)
            else:
                cur_arr = np.expand_dims(cur_sample, axis=0)
            np.save(cur_out_file, cur_arr)
コード例 #30
0
def visualize_coattention_with_features(path_to_model, level_name,
                        question_features, image_features,
                        images, titles,
                        image_rows, image_cols,
                        target_shape):
    """ Notice: Its rather unlikely to have the features already. This is more a testing showcase. """
    
    image_features_shape = np.shape(image_features)
    image_feature_size = image_features_shape[1]
    
    question_features_shape = np.shape(question_features)
    question_feature_size = question_features_shape[1]
     
    model = coattention_affinity_model(level_name, question_feature_size, image_feature_size, __NO_DROPOUT, 512)
    model.load_weights(path_to_model, by_name=True)
    model = Model(inputs=model.input, outputs=model.get_layer(level_name + "_image_attention").output)
    
    attention = model.predict({"image_features":image_features, "question_features": question_features})
    attention = upscale_attention(attention, target_shape)
    
    show_many_with_alpha(images, attention, image_rows, image_cols, figsize=(4, 3), titles=titles)