def test_gradient(self): val = np.random.random((4, 2)) xth = KTH.variable(val) xtf = KTF.variable(val) expth = xth * KTH.exp(xth) exptf = xtf * KTF.exp(xtf) lossth = KTH.sum(expth) losstf = KTF.sum(exptf) zero_lossth = KTH.stop_gradient(lossth) zero_losstf = KTF.stop_gradient(losstf) gradth = KTH.gradients(lossth, [expth]) gradtf = KTF.gradients(losstf, [exptf]) zero_gradth = KTH.gradients(lossth + zero_lossth, [expth]) zero_gradtf = KTF.gradients(losstf + zero_losstf, [exptf]) zth = KTH.eval(gradth[0]) ztf = KTF.eval(gradtf[0]) zero_zth = KTH.eval(zero_gradth[0]) zero_ztf = KTF.eval(zero_gradtf[0]) assert zth.shape == ztf.shape assert zero_zth.shape == zero_ztf.shape assert_allclose(zth, ztf, atol=1e-05) assert_allclose(zero_zth, zero_ztf, atol=1e-05) assert_allclose(zero_zth, zth, atol=1e-05) assert_allclose(zero_ztf, ztf, atol=1e-05)
def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight): """Calculates the gradient penalty loss for a batch of "averaged" samples. In Improved WGANs, the 1-Lipschitz constraint is enforced by adding a term to the loss function that penalizes the network if the gradient norm moves away from 1. However, it is impossible to evaluate this function at all points in the input space. The compromise used in the paper is to choose random points on the lines between real and generated samples, and check the gradients at these points. Note that it is the gradient w.r.t. the input averaged samples, not the weights of the discriminator, that we're penalizing! In order to evaluate the gradients, we must first run samples through the generator and evaluate the loss. Then we get the gradients of the discriminator w.r.t. the input averaged samples. The l2 norm and penalty can then be calculated for this gradient. Note that this loss function requires the original averaged samples as input, but Keras only supports passing y_true and y_pred to loss functions. To get around this, we make a partial() of the function with the averaged_samples argument, and use that for model training.""" # first get the gradients: # assuming: - that y_pred has dimensions (batch_size, 1) # - averaged_samples has dimensions (batch_size, nbr_features) # gradients afterwards has dimension (batch_size, nbr_features), basically # a list of nbr_features-dimensional gradient vectors gradients = K.gradients(y_pred, averaged_samples)[0] # compute the euclidean norm by squaring ... gradients_sqr = K.square(gradients) # ... summing over the rows ... gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape))) # ... and sqrt gradient_l2_norm = K.sqrt(gradients_sqr_sum) # compute lambda * (1 - ||grad||)^2 still for each single sample gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm) # return the mean as loss over all the batch samples return K.mean(gradient_penalty)
def grad_cam(model,inputdata): inputdata = np.reshape(inputdata, [1, 300, 1]) #try using book model_output = model.output[:,0] last_conv_layer= model.get_layer('conv1d_1') grads = K.gradients(model_output,last_conv_layer.output)[0] pooled_grads = K.mean(grads,axis=(0,1)) iterate = K.function([model.input], [pooled_grads,last_conv_layer.output]) pooled_grads_value,conv_layer_output_value = iterate([inputdata]) for i in range(len(pooled_grads_value)): conv_layer_output_value[:,:,i] *= pooled_grads_value[i] grad_cam = np.average(conv_layer_output_value, 0) cam_data = [] for i in range(len(grad_cam)): cam_data.append(np.average(grad_cam[i,:])) cam_data = np.reshape(cam_data,[1,len(cam_data)]) from scipy.signal import savgol_filter #test_cam2 = savgol_filter(cam_data, 3,0) test_cam2 = np.resize(cam_data,[1,300]) """ fig = plt.figure(figsize=(20, 10)) ax0 = plt.subplot2grid((1, 1), (0, 0), colspan=1) plt.yticks(fontsize=15) ax0.plot(inputdata.flatten(),c='blue') ax0_2 = ax0.twinx() ax0_2.imshow(test_cam2,cmap='gist_heat',aspect='auto',alpha=0.4) #plt.show() """ return test_cam2
def grad_cam(self, input_model, image, category_index, layer_name): nb_classes = 5 # 18 target_layer = lambda x: self.target_category_loss(x, category_index, nb_classes) data_ = [] for i in input_model.layers: if i.name == 'add_marge': print('add_marge',i) data_.append(i) # レイヤー指定 # モデルの推論を実行すると、予測クラス以外の値は0になる x = input_model.layers[9].output print('input',input_model.layers[9].name) x = Lambda(target_layer, output_shape=self.target_category_loss_output_shape)(x) model = keras.models.Model(input_model.layers[0].input, x) data = [] for i in model.layers: if i.name == 'prediction_branch': print('prediction',i) data.append(i) conv_output = model.layers[3].output #3 print('layer4', conv_output) # print(conv_output = [l for l in input_model.layers if l.name == layer_name][0].output) # 予測クラス以外の値は0になっている ・予測の損失 # sumをとり予測クラスの値のみを抽出 loss = KTF.sum(model.layers[9].output) print('nameeeeee final', model.layers[9].name) # 予測クラスの値から最後のconv層までの勾配を算出する関数を定義 # grads = self.normalize(KTF.gradients(loss, conv_output)[0]) gradient_function = KTF.function([model.layers[0].input], [conv_output, grads]) # 定義した勾配計算用の関数で算出 output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] # 最後のconv層のチャンネル毎に勾配の平均を算出し # かくチャンネルの重要度とする # GAP weights = np.mean(grads_val, axis=(0, 1)) cam = np.ones(output.shape[0: 2], dtype=np.float32) for i, w in enumerate(weights): cam += w * (output[:, :, i]) # 255*(output[:,:,i]) cam = cv2.resize(cam, (300, 300)) # 負の値を0に変換。処理はReluと同意 cam = np.maximum(cam, 0) # 値を0-1に正規化 heatmap = cam / np.max(cam) # Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam1 = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) # ヒートマップと入力画像の重ね合わせ cam = np.float32(cam1) + np.float32(image) cam = 255 * cam / np.max(cam) # ヒートマップのみ cam1 = cv2.resize(cam1, (300, 300)) cv2.imwrite('/Users/matsunagamasaaki/Desktop/gimage.png', image) cv2.imwrite('/Users/matsunagamasaaki/Desktop/gcam.png', cam1) return np.uint8(cam), heatmap, cam1
for j in range(nb_style_images): sl2.append( style_loss(style_reference_features[j], combination_features, style_masks[j], shape)) for j in range(nb_style_images): sl = sl1[j] - sl2[j] # Improvement 4 # Geometric weighted scaling of style loss loss += (style_weights[j] / (2**(nb_layers - (i + 1)))) * sl loss += total_variation_weight * total_variation_loss(combination_image) # get the gradients of the generated image wrt the loss grads = K.gradients(loss, combination_image) outputs = [loss] if type(grads) in {list, tuple}: outputs += grads else: outputs.append(grads) f_outputs = K.function([combination_image], outputs) def eval_loss_and_grads(x): if K.image_dim_ordering() == 'th': x = x.reshape((1, 3, img_width, img_height)) else: x = x.reshape((1, img_width, img_height, 3))
def grad_cam(self, input_model, image, category_index, layer_name, boxcoords): nb_classes = 5 #6#18 # bounding box boords xmin = boxcoords[0] ymin = boxcoords[1] xmax = boxcoords[2] ymax = boxcoords[3] target_layer = lambda x: self.target_category_loss( x, category_index, nb_classes) # レイヤー指定 x = input_model.layers[-3].output x = Lambda(target_layer, output_shape=self.target_category_loss_output_shape)(x) model = keras.models.Model(input_model.layers[0].input, x) conv_output = model.layers[30].output #model.layers[5].output #print(conv_output = [l for l in input_model.layers if l.name == layer_name][0].output) loss = KTF.sum(model.layers[-3].output) grads = self.normalize(KTF.gradients(loss, conv_output)[0]) gradient_function = KTF.function([model.layers[0].input], [conv_output, grads]) output, grads_val = gradient_function([image]) output, grads_val = output[0, :], grads_val[0, :, :, :] #多分GAP weights = np.mean(grads_val, axis=(0, 1)) cam = np.ones(output.shape[0:2], dtype=np.float32) for i, w in enumerate(weights): cam += w * (255 * output[:, :, i]) cam = cv2.resize(cam, (300, 300)) cam = np.maximum(cam, 0) heatmap = cam / np.max(cam) #Return to BGR [0..255] from the preprocessed image image = image[0, :] image -= np.min(image) image = np.minimum(image, 255) cam1 = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET) # boundingbox以外のヒートマップ領域を消す cam1 = cv2.resize(cam1, (720, 405)) cam1[:, 0:xmin, :] = 0 # left cam1[0:ymin, :, :] = 0 # top cam1[ymax:-1, :, :] = 0 # bottom cam1[:, xmax:-1, :] = 0 print('PPPPPPPPP', np.shape(cam1)) cam1 = cv2.resize(cam1, (300, 300)) heatmap = cv2.resize(heatmap, (720, 405)) heatmap[:, 0:xmin] = 0 # left heatmap[0:ymin, :] = 0 # top heatmap[ymax:-1, :] = 0 # bottom heatmap[:, xmax:-1] = 0 heatmap = cv2.resize(heatmap, (300, 300)) # 視認性を上げるため入力画像を重ねる cam = np.float32(cam1) + np.float32(image) cam = 255 * cam / np.max(cam) return np.uint8(cam), heatmap
# In[17]: for layer_name in style_layers: layer_features = outputs_dict[layer_name] style_reference_features = layer_features[1, :, :, :] combination_features = layer_features[2, :, :, :] sl = style_loss(style_reference_features, combination_features) loss += (style_weight / len(style_layers)) * sl # In[18]: loss += total_variation_weight * total_variation_loss(combination_image) # In[19]: grads = KTF.gradients(loss, combination_image)[0] fetch_loss_and_grads = KTF.function([combination_image], [loss, grads]) # In[20]: class Evaluator(object): def __init__(self): self.loss_value = None self.grads_values = None def loss(self, x): assert self.loss_value is None x = x.reshape((1, img_height, img_width, 3)) outs = fetch_loss_and_grads([x]) loss_value = outs[0]