python中卷积函数 python 卷积( 三 )


from vis.utils import utils
from keras import activations
from matplotlib import pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (18, 6)
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
layer_idx = utils.find_layer_idx(model, 'preds')
# Swap softmax with linear
model.layers[layer_idx].activation = activations.linear
model = utils.apply_modifications(model)
# This is the output node we want to maximize.filter_idx = 0
img = visualize_activation(model, layer_idx, filter_indices=filter_idx)
plt.imshow(img[..., 0])
同理 , 可以将这个想法应用于所有的类别,并检查它们的模式会是什么样子 。
for output_idx in np.arange(10):
# Lets turn off verbose output this time to avoid clutter and just see the output.
img = visualize_activation(model, layer_idx, filter_indices=output_idx, input_range=(0., 1.))
plt.figure()
plt.title('Networks perception of {}'.format(output_idx))
plt.imshow(img[..., 0])
在图像分类问题中,可能会遇到目标物体被遮挡,有时候只有物体的一小部分可见的情况 。基于图像遮挡的方法是通过一个灰色正方形系统地输入图像的不同部分并监视分类器的输出 。这些例子清楚地表明模型在场景中定位对象时 , 若对象被遮挡,其分类正确的概率显著降低 。
为了理解这一概念,可以从数据集中随机抽取图像,并尝试绘制该图的热图(heatmap) 。这使得我们直观地了解图像的哪些部分对于该模型而言的重要性,以便对实际类别进行明确的区分 。
def iter_occlusion(image, size=8):
# taken from
occlusion = np.full((size * 5, size * 5, 1), [0.5], np.float32)
occlusion_center = np.full((size, size, 1), [0.5], np.float32)
occlusion_padding = size * 2
# print('padding...')
image_padded = np.pad(image, ( \(occlusion_padding, occlusion_padding), (occlusion_padding, occlusion_padding), (0, 0) \), 'constant', constant_values = 0.0)
for y in range(occlusion_padding, image.shape[0] + occlusion_padding, size):
for x in range(occlusion_padding, image.shape[1] + occlusion_padding, size):
tmp = image_padded.copy()
tmp[y - occlusion_padding:y + occlusion_center.shape[0] + occlusion_padding, \
x - occlusion_padding:x + occlusion_center.shape[1] + occlusion_padding] \= occlusion
tmp[y:y + occlusion_center.shape[0], x:x + occlusion_center.shape[1]] = occlusion_centeryield x - occlusion_padding, y - occlusion_padding, \
tmp[occlusion_padding:tmp.shape[0] - occlusion_padding, occlusion_padding:tmp.shape[1] - occlusion_padding]i = 23 # for exampledata = https://www.04ip.com/post/val_x[i]correct_class = np.argmax(val_y[i])
# input tensor for model.predictinp = data.reshape(1, 28, 28, 1)# image data for matplotlib's imshowimg = data.reshape(28, 28)
# occlusionimg_size = img.shape[0]
occlusion_size = 4print('occluding...')heatmap = np.zeros((img_size, img_size), np.float32)class_pixels = np.zeros((img_size, img_size), np.int16)
from collections import defaultdict
counters = defaultdict(int)for n, (x, y, img_float) in enumerate(iter_occlusion(data, size=occlusion_size)):
X = img_float.reshape(1, 28, 28, 1)
out = model.predict(X)
#print('#{}: {} @ {} (correct class: {})'.format(n, np.argmax(out), np.amax(out), out[0][correct_class]))
#print('x {} - {} | y {} - {}'.format(x, x + occlusion_size, y, y + occlusion_size))
heatmap[y:y + occlusion_size, x:x + occlusion_size] = out[0][correct_class]
class_pixels[y:y + occlusion_size, x:x + occlusion_size] = np.argmax(out)
counters[np.argmax(out)] += 1
正如之前的坦克案例中看到的那样,怎么才能知道模型侧重于哪部分的预测呢?为此,可以使用显著图解决这个问题 。显著图首先在这篇文章中被介绍 。

推荐阅读