import tensorflow as tf
from tensorflow.keras import layers, models
# Set hyperparameters
latent_dim = 100
image_shape = (28, 28, 1)
batch_size = 64
epochs = 50
# Load and preprocess the data
(train_images, _), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.astype('float32') / 255.
train_images = tf.expand_dims(train_images, axis=-1)
# Define the encoder
encoder_inputs = layers.Input(shape=image_shape)
x = layers.Conv2D(32, 3, strides=2, padding='same', activation='relu')(encoder_inputs)
x = layers.Conv2D(64, 3, strides=2, padding='same', activation='relu')(x)
x = layers.Flatten()(x)
latent = layers.Dense(latent_dim)(x)
encoder = models.Model(encoder_inputs, latent, name='encoder')
# Define the decoder
decoder_inputs = layers.Input(shape=(latent_dim,))
x = layers.Dense(7 * 7 * 64, activation='relu')(decoder_inputs)
x = layers.Reshape((7, 7, 64))(x)
x = layers.Conv2DTranspose(32, 3, strides=2, padding='same', activation='relu')(x)
x = layers.Conv2DTranspose(1, 3, strides=2, padding='same', activation='sigmoid')(x)
decoder = models.Model(decoder_inputs, x, name='decoder')
# Define the discriminator
discriminator_inputs = layers.Input(shape=image_shape)
x = layers.Conv2D(32, 3, strides=2, padding='same', activation='relu')(discriminator_inputs)
x = layers.Conv2D(64, 3, strides=2, padding='same', activation='relu')(x)
x = layers.Flatten()(x)
x = layers.Dense(1, activation='sigmoid')(x)
discriminator = models.Model(discriminator_inputs, x, name='discriminator')
# Define the AAE
aae_inputs = layers.Input(shape=image_shape)
latent = encoder(aae_inputs)
decoded = decoder(latent)
aae = models.Model(aae_inputs, decoded, name='aae')
# Define the loss functions
reconstruction_loss_fn = tf.keras.losses.BinaryCrossentropy()
def reconstruction_loss(real_images, reconstructed_images):
loss = reconstruction_loss_fn(tf.ones_like(real_images), reconstructed_images)
return loss
adversarial_loss_fn = tf.keras.losses.BinaryCrossentropy()
def adversarial_loss(real_labels, predicted_labels):
loss = adversarial_loss_fn(real_labels, predicted_labels)
return loss
# Define the optimizer
optimizer = tf.keras.optimizers.Adam(1e-4)
def train_step(train_images): # 训练判别器 使用 tf.GradientTape() 作为 disc_tape: # 生成假图像 latent = tf.random.normal((batch_size, latent_dim)) generated_images = 解码器(潜在)
# Compute the discriminator loss
real_labels = tf.ones((batch_size, 1))
fake_labels = tf.zeros((batch_size, 1))
real_loss = adversarial_loss(real_labels, discriminator(train_images))
fake_loss = adversarial_loss(fake_labels, discriminator(generated_images))
disc_loss = real_loss + fake_loss
# Update the discriminator weights
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
# Train the generator
with tf.GradientTape() as gen_tape:
# Generate fake images
latent = tf.random.normal((batch_size, latent_dim))
generated_images = decoder(latent)
# Compute the generator loss
g_loss = reconstruction_loss(images, generated_images)
g_loss += adversarial_loss(tf.ones((batch_size, 1)), discriminator(generated_images))
# Update the generator weights
gradients_of_generator = gen_tape.gradient(g_loss, decoder.trainable_variables)
optimizer.apply_gradients(zip(gradients_of_generator, decoder.trainable_variables))
return disc_loss, g_loss
and i have error in this cell :
将 numpy 导入为 np 对于范围内的纪元(纪元): print(f"Epoch {epoch+1}/{epochs}")
# Shuffle the training data
np.random.shuffle(train_images)
# Iterate over the training data in batches
for i in range(0, len(train_images), batch_size):
# Get the next batch of images
images = train_images[i:i+batch_size]
# Train the AAE on this batch of images
disc_loss, g_loss = train_step(images)
# Print the loss every 10 batches
if i % 10 == 0:
print(f"Batch {i}/{len(train_images)}: disc_loss={disc_loss:.4f}, g_loss={g_loss:.4f}")
错误是:TypeError: 'tensorflow.python.framework.ops.EagerTensor' object does not support item assignment 我不知道我该怎么做才能解决它
Im searching about any help