def generator(latent_dim, n_classes):
initializer = tf.random_normal_initializer(0., 0.021)
in_label = Input(shape=(1,))
li = Embedding(n_classes, 25)(in_label)
n_nodes = 64 * 64
li = Dense(n_nodes)(li)
li = Reshape((64, 64, 1))(li)
in_lat = Input(shape=(latent_dim,))
n_nodes = 128 * 64 * 64
gen = Dense(n_nodes)(in_lat)
gen = LeakyReLU(alpha=0.2)(gen)
gen = BatchNormalization(axis=-1)(gen)
gen = Reshape((64, 64, 128))(gen)
merge = Concatenate()([gen, li])
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same',kernel_initializer=initializer,use_bias=False)(merge)
gen = LeakyReLU(alpha=0.2)(gen)
gen = BatchNormalization(axis=-1)(gen)
gen = Conv2DTranspose(128, (4,4), strides=(2,2), padding='same',kernel_initializer=initializer,use_bias=False)(gen)
gen = LeakyReLU(alpha=0.2)(gen)
gen = BatchNormalization(axis=-1)(gen)
out_layer = Conv2D(3, (7,7), activation='tanh', padding='same',kernel_initializer=initializer,use_bias=False)(gen)
model = Model([in_lat, in_label], out_layer, name="generator")
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(optimizer=opt,loss='binary_crossentropy', metrics='accuracy')
return model
def define_discriminator(n_classes,in_shape=(256,256,3)):
initializer = tf.random_normal_initializer(0., 0.021)
in_label = Input(shape=(1,))
li = Embedding(n_classes, 25)(in_label)
n_nodes = in_shape[0] * in_shape[1]
li = Dense(n_nodes)(li)
li = Reshape((in_shape[0], in_shape[1],1))(li)
in_image = Input(shape=in_shape)
merge = Concatenate()([in_image, li])
fe = Conv2D(128, (3,3), strides=(2,2), padding='same',kernel_initializer=initializer,use_bias=False)(merge)
fe = LeakyReLU(alpha=0.2)(fe)
fe = BatchNormalization(axis=-1)(fe)
fe = Conv2D(128, (3,3), strides=(2,2), padding='same',kernel_initializer=initializer,use_bias=False)(fe)
fe = LeakyReLU(alpha=0.2)(fe)
fe = BatchNormalization(axis=-1)(fe)
fe = Flatten()(fe)
fe = Dropout(0.4)(fe)
out_layer = Dense(1, activation='sigmoid')(fe)
model = Model([in_image, in_label], out_layer, name="discriminator")
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics=['accuracy'])
return model
def define_gan(g_model, d_model):
g_model.trainable = True
d_model.trainable = False
gen_noise, gen_label = g_model.input
gen_output = g_model.output
gan_output = d_model([gen_output, gen_label])
model = Model([gen_noise, gen_label], gan_output, name="gan")
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(optimizer=opt, loss='binary_crossentropy', metrics='accuracy')
return model
below是不同迭代的结果/图像。 16个正方形是16个 不同的潜在载体种子。
没有看到任何代码很难推测,但是您可以检查thisRepo以进行参考。
关注此注意:
分开,出于逃脱我们的原因,本笔记本中的歧视者几乎总是无法学习,如果您仅在CPU上训练。由于这种失败,gan很少学习如何生成草图 - 即,它将输出仅是随机噪声的图像。我们已经确定了两种方法来解决这种情况: