我有一个 CNN 模型,我想使用 GradCam 使用模型的最后一个卷积层在图像上显示热图。 每次我尝试这样做时,它都会告诉我顺序从未被调用,因此没有输出。
如何解决这个问题?
import os
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout, BatchNormalization, GlobalAveragePooling2D
from tensorflow.keras.layers import Rescaling, RandomFlip, RandomRotation, RandomZoom, RandomContrast
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
# Define paths to the dataset
base_dir = 'Organised_data - Copy'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Paths to the dataset
train_dir = 'Organised_data - Copy/train'
validation_dir = 'Organised_data - Copy/validation'
# Parameters
batch_size = 64
img_height = 50
img_width = 50
histories = []
# Define data augmentation using Keras preprocessing layers
data_augmentation = tf.keras.Sequential([
tf.keras.layers.RandomFlip('horizontal'),
tf.keras.layers.RandomRotation(0.2),
tf.keras.layers.RandomZoom(0.2),
tf.keras.layers.RandomContrast(0.2),
])
# Load and preprocess datasets
train_dataset = tf.keras.preprocessing.image_dataset_from_directory(
train_dir,
image_size=(img_height, img_width),
batch_size=batch_size,
label_mode='binary'
)
validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(
validation_dir,
image_size=(img_height, img_width),
batch_size=batch_size,
label_mode='binary'
)
# Normalize pixel values to [0, 1]
normalization_layer = tf.keras.layers.Rescaling(1./255)
train_dataset = train_dataset.map(lambda x, y: (normalization_layer(x), y))
validation_dataset = validation_dataset.map(lambda x, y: (normalization_layer(x), y))
# Apply data augmentation to the training dataset
train_dataset = train_dataset.map(lambda x, y: (data_augmentation(x, training=True), y))
# Define a simple CNN model
model = Sequential([
Rescaling(1./255, input_shape=(img_height, img_width, 3)), # Normalization layer
# Data augmentation layers
RandomFlip('horizontal'),
RandomRotation(0.2),
RandomZoom(0.2),
RandomContrast(0.2),
# Convolutional layers
Conv2D(32, (3, 3), activation='relu', padding='same'),
MaxPooling2D((2, 2)),
Conv2D(64, (3, 3), activation='relu', padding='same'),
MaxPooling2D((2, 2)),
Conv2D(128, (3, 3), activation='relu', padding='same'),
MaxPooling2D((2, 2)),
# Global Average Pooling to reduce the feature maps
GlobalAveragePooling2D(),
# Dense layers
Dense(64, activation='relu'),
Dense(1, activation='sigmoid') # Sigmoid for binary classification
])
# Compile the model
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Model summary
model.summary()
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
# Define callbacks
early_stopping = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
model_checkpoint = ModelCheckpoint('best_model.keras', save_best_only=True, monitor='val_loss')
# Train the model
history = model.fit(
train_dataset,
validation_data=validation_dataset,
epochs=20, # You can adjust the number of epochs
callbacks=[early_stopping, model_checkpoint]
)
# Save the final model
model.save('final_model.keras')
这直到模型训练。 模型总结是
Model: "sequential_1"
┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━┓
┃ Layer (type) ┃ Output Shape ┃ Param # ┃
┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━┩
│ rescaling_1 (Rescaling) │ (None, 50, 50, 3) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ random_flip_1 (RandomFlip) │ (None, 50, 50, 3) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ random_rotation_1 │ (None, 50, 50, 3) │ 0 │
│ (RandomRotation) │ │ │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ random_zoom_1 (RandomZoom) │ (None, 50, 50, 3) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ random_contrast_1 │ (None, 50, 50, 3) │ 0 │
│ (RandomContrast) │ │ │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ conv2d (Conv2D) │ (None, 50, 50, 32) │ 896 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ max_pooling2d (MaxPooling2D) │ (None, 25, 25, 32) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ conv2d_1 (Conv2D) │ (None, 25, 25, 64) │ 18,496 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ max_pooling2d_1 (MaxPooling2D) │ (None, 12, 12, 64) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ conv2d_2 (Conv2D) │ (None, 12, 12, 128) │ 73,856 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ max_pooling2d_2 (MaxPooling2D) │ (None, 6, 6, 128) │ 0 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ global_average_pooling2d │ (None, 128) │ 0 │
│ (GlobalAveragePooling2D) │ │ │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense (Dense) │ (None, 64) │ 8,256 │
├─────────────────────────────────┼────────────────────────┼───────────────┤
│ dense_1 (Dense) │ (None, 1) │ 65 │
└─────────────────────────────────┴────────────────────────┴───────────────┘
Total params: 101,569 (396.75 KB)
Trainable params: 101,569 (396.75 KB)
Non-trainable params: 0 (0.00 B)
gradcam 位是
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
# Load the best model
model = tf.keras.models.load_model('best_model.keras')
# Function to compute Grad-CAM
def get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=None):
# Ensure we get the output from the correct convolutional layer
grad_model = Model(inputs=model.inputs,
outputs=[model.get_layer(last_conv_layer_name).output, model.output])
with tf.GradientTape() as tape:
conv_outputs, predictions = grad_model(img_array)
if pred_index is None:
pred_index = tf.argmax(predictions[0])
class_channel = predictions[:, pred_index]
grads = tape.gradient(class_channel, conv_outputs)
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
conv_outputs = conv_outputs[0]
heatmap = conv_outputs @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
return heatmap.numpy()
# Function to overlay heatmap on image
def display_gradcam(img_path, heatmap, alpha=0.4):
img = image.load_img(img_path)
img = image.img_to_array(img)
heatmap = np.uint8(255 * heatmap)
jet = plt.cm.get_cmap("jet")
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
jet_heatmap = tf.image.resize(jet_heatmap, (img.shape[1], img.shape[0]))
jet_heatmap = tf.keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = tf.keras.preprocessing.image.img_to_array(jet_heatmap)
superimposed_img = jet_heatmap * alpha + img
superimposed_img = tf.keras.preprocessing.image.array_to_img(superimposed_img)
plt.imshow(superimposed_img)
plt.axis('off')
plt.show()
# Load and preprocess an image of your choice
img_path = '8863_idx5_x1301_y951_class1.png'
img = image.load_img(img_path, target_size=(img_height, img_width))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
# Ensure the last convolutional layer's name matches your model's architecture
last_conv_layer_name = 'conv2d_2' # replace with the actual name of the last convolutional layer in your model
heatmap = get_gradcam_heatmap(model, img_array, last_conv_layer_name)
这就是 GradCam 位。 我得到的错误是,
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[11], line 59
57 # Ensure the last convolutional layer's name matches your model's architecture
58 last_conv_layer_name = 'conv2d_2' # replace with the actual name of the last convolutional layer in your model
---> 59 heatmap = get_gradcam_heatmap(model, img_array, last_conv_layer_name)
61 # Display the heatmap on the original image
62 display_gradcam(img_path, heatmap)
Cell In[11], line 14
11 def get_gradcam_heatmap(model, img_array, last_conv_layer_name, pred_index=None):
12 # Ensure we get the output from the correct convolutional layer
13 grad_model = Model(inputs=model.inputs,
---> 14 outputs=[model.get_layer(last_conv_layer_name).output, model.output])
16 with tf.GradientTape() as tape:
17 conv_outputs, predictions = grad_model(img_array)
File c:\Users\kaust\Desktop\Kaustav\pyhton\IDC detection\.venv\Lib\site-packages\keras\src\ops\operation.py:266, in Operation.output(self)
256 @property
257 def output(self):
258 """Retrieves the output tensor(s) of a layer.
259
260 Only returns the tensor(s) corresponding to the *first time*
(...)
264 Output tensor or list of output tensors.
...
292 f"{node_index}, but the operation has only "
293 f"{len(self._inbound_nodes)} inbound nodes."
294 )
ValueError: The layer sequential_1 has never been called and thus has no defined output.
Output is truncated. View as a scrollable element or open in a text editor. Adjust cell output settings...
尝试一次又一次地检查模型是否没有被调用以及出于什么原因等。 没有进展。
出现您遇到的错误是因为当您尝试获取最后一个卷积层的输出时尚未调用顺序模型。
换句话说,模型尚未处理任何输入数据,因此其层尚未定义任何输出张量。
要解决此问题,您需要首先确保已使用一些输入数据调用模型,然后再尝试从其任何层提取输出。