class State: cnn_model = load_model(os.path.join("","vgg16.h5")) feature_extractor = K.function([cnn_model.layers[0].input], [cnn_model.layers[20].output]) def __init__(self, history, bb, image): self.history = history self.bb = bb self.feature = State.compute_feature(history, bb, image) @staticmethod def compute_feature(history, bb, image): history_feature = State.get_history_feature(history) image_feature = State.get_image_feature(image, bb) feature = np.concatenate((image_feature, history_feature)) return np.array([feature]) @staticmethod def get_image_feature(image, bb): cropped = crop_image(bb, image) feature = State.feature_extractor([cropped.reshape(1, 224, 224, 3)])[0] return np.ndarray.flatten(feature) @staticmethod def get_history_feature(history): assert len(history) == history_length feature = np.zeros((90,)) for i in range(history_length): action = history[i] if action != -1: feature[i * 9 + action] = 1 return feature
def get_layer_output_grad(model, inputs, outputs, layer=-1): """ Gets gradient a layer output for given inputs and outputs""" grads = model.optimizer.get_gradients(model.total_loss, model.layers[layer].output) symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights) f = K.function(symb_inputs, grads) x, y, sample_weight = model._standardize_user_data(inputs, outputs) output_grad = f(x + y + sample_weight) return output_grad
def get_weight_grad(model, inputs, outputs): """ Gets gradient of model for given inputs and outputs for all weights""" grads = model.optimizer.get_gradients(model.total_loss, model.trainable_weights) symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights) f = K.function(symb_inputs, grads) x, y, sample_weight = model._standardize_user_data(inputs, outputs) output_grad = f(x + y + sample_weight) return output_grad
def extract_feature(image, history, vgg16): history_feature = np.zeros(action_option * history_size) for i in range(history_size): if history[i] != -1: history_feature[i * action_option + history[i]] = 1 feature_extractor = K.function([vgg16.layers[0].input], [vgg16.layers[20].output]) image_reshape = [(cv2.resize(image, (224, 224))).reshape(1, 224, 224, 3)] image_feature = feature_extractor(image_reshape)[0] image_feature = np.ndarray.flatten(image_feature) feature = np.concatenate((image_feature, history_feature)) return np.array([feature])
def get_activations(model, inputs, print_shape_only=False, layer_name=None): print('----- activations -----') activations = [] input = model.input if layer_name is None: outputs = [layer.output for layer in model.layers] else: outputs = [layer.output for layer in model.layers if layer.name == layer_name] funcs = [K.function([input] + [K.learning_phase()], [output]) for output in outputs] layer_outputs = [func([inputs, 1.])[0] for func in funcs] for layer_activations in layer_outputs: activations.append(layer_activations) if print_shape_only: print(layer_activations.shape) else: print(layer_activations) return activations
def grad_cam(input_model, image, category_index, layer_name): model = Sequential() model.add(input_model) nb_classes = 1000 target_layer = lambda x: target_category_loss(x, category_index, nb_classes ) model.add( Lambda(target_layer, output_shape=target_category_loss_output_shape)) loss = K.sum(model.layers[-1].output) conv_output = [l for l in model.layers[0].layers if l.name is layer_name][0].output grads = normalize(K.gradients(loss, conv_output)[0]) gradient_function = K.function([model.layers[0].input], [conv_output, grads]) output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] weights = np.mean(grads_val, axis=(0, 1)) cam = np.ones(output.shape[0:2], dtype=np.float32) for i, w in enumerate(weights): cam += w * output[:, :, i] w, h, _ = image.shape cam = cv2.resize(cam, (w, h)) cam = np.maximum(cam, 0) heatmap = cam / np.max(cam) # Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) cam = np.float32(cam) + np.float32(image) cam = 255 * cam / np.max(cam) return np.uint8(cam), heatmap
multi_channel.append(model_word2vec_input) multi_channel_embedding = merge(multi_channel, mode="concat", concat_axis=1) conv1_output = cnn_model([multi_channel_embedding, multi_channel_embedding, multi_channel_embedding]) full_connected_layers = Dense(output_dim=len(label_to_index), init="glorot_uniform", activation="relu")( conv1_output ) dropout_layers = Dropout(p=0.5)(full_connected_layers) softmax_output = Activation("softmax")(dropout_layers) model = Model(input=[model_onehot_input, model_word2vec_input], output=[softmax_output]) model_output = K.function([model_onehot_input, model_word2vec_input, K.learning_phase()], [softmax_output]) if verbose > 1: print model.summary() # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) logging.debug("开始训练...") print "开始训练..." model.compile(loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]) logging.debug("开始训练,迭代次数:%s" % (config["cnn_nb_epoch"])) logging.debug("开始训练,EarlyStopping的patience为:%d次" % (config["earlyStoping_patience"])) early_stop = EarlyStopping(patience=config["earlyStoping_patience"], verbose=1) # print train_data_features.shape[2] model.fit(
# print cnn_model.summary() model_input = Input(shape=(1,input_length,word_embedding_length)) conv1_output = cnn_model([model_input,model_input,model_input]) full_connected_layers = Dense(output_dim=len(label_to_index), init="glorot_uniform",activation='relu')(conv1_output) dropout_layers = Dropout(p=0.5)(full_connected_layers) softmax_output = Activation("softmax")(dropout_layers) model = Model(input=[model_input], output=[softmax_output]) model_output = K.function([model_input, K.learning_phase()], [softmax_output]) if verbose > 1: print model.summary() # sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) logging.debug('开始训练...') print '开始训练...' model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) logging.debug('开始训练,迭代次数:%s' % (config['cnn_nb_epoch'])) logging.debug('开始训练,EarlyStopping的patience为:%d次' % (config['earlyStoping_patience'])) early_stop = EarlyStopping(patience=config['earlyStoping_patience'], verbose=1) # print train_data_features.shape[2]