。。。。。这些实现离不开今天的主题:生成性对抗网络(深度学习算法中的一种重要模型)。


生成网络G,其采用密度为pz的随机输入z,并返回输出xg = G(z),该输出应遵循(训练后)目标概率分布。
一个判别网络D,它取一个可以是“真”的输入x(xt,其密度用pt表示)或“生成”的一个(xg,其密度pg是由密度pz引起的密度通过G)并将x的概率D(x)返回为“真实”数据。
在图上进行了标注:


四、Python实现代码
import numpy as npfrom matplotlib import pyplot as pltfrom keras.models import Sequential # Keras model modulefrom keras.layers import Dense, Dropout, Activation, Flattenfrom keras.layers import Inputfrom keras.layers import LeakyReLUfrom keras.datasets import mnist# 加载MNIST数据集用于训练(X_train, y_train), (X_test, y_test) = mnist.load_data()class Adam:def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):self.lr = lrself.beta1 = beta1self.beta2 = beta2self.iter = 0self.m = Noneself.v = Nonedef update(self, params, grads):if self.m is None:self.m, self.v = {}, {}for key, val in params.items():self.m[key] = np.zeros_like(val)self.v[key] = np.zeros_like(val)self.iter += 1lr_t = self.lr * np.sqrt(1.0 - self.beta2**self.iter) / (1.0 - self.beta1**self.iter)for key in params.keys():self.m[key] += (1 - self.beta1) * (grads[key] - self.m[key])self.v[key] += (1 - self.beta2) * (grads[key ]**2 - self.v[key])params[key] -= lr_t * self.m[key] / (np.sqrt(self.v[key]) + 1e-7)# 定义GAN网络框架class GAN():def __init__(self):self.img_rows = 28self.img_cols = 28self.channels = 1self.img_shap = (self.img_rows, self.img_cols, self.channels)# Adam优化器(SGD等,自己选一个)optimizer = Adam(0.0002, 0.5)# 构建和编译判别器self.discriminator = self.build_discriminator()self.discriminator.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])# 构建和编译生成器self.generator = self.build_generator()self.generator.compile(loss='binary_crossentropy', optimizer=optimizer)# 生成器工作z = Input(shape=(100,))img = self.generator(z)# 仅训练生成器self.discriminator.trainable = False#将生成的图像输入到对抗器中,并验证真实性valid = self.discriminator(img)# 实现过程:input => generates images => determines validityself.combined = Model(z, valid)self.combined.compile(loss='binary_crossentropy', optimizer=optimizer)def build_generator(self):noise_shape = (100,)model = Sequential()model.add(Dense(256, input_shape=noise_shape))model.add(LeakyReLU(alpha=0.2))model.add(BatchNormalization(momentum=0.8))model.add(Dense(512))model.add(LeakyReLU(alpha=0.2))model.add(BatchNormalization(momentum=0.8))model.add(Dense(1024))model.add(LeakyReLU(alpha=0.2))model.add(BatchNormalization(momentum=0.8))model.add(Dense(np.prod(self.img_shape), activation='tanh'))model.add(Reshape(self.img_shape))model.summary()noise = Input(shape=noise_shape)img = model(noise)return Model(noise, img)def build_discriminator(self):img_shape = (self.img_rows, self.img_cols, self.channels)model = Sequential()model.add(Flatten(input_shape=img_shape))model.add(Dense(512))model.add(LeakyReLU(alpha=0.2))model.add(Dense(256))model.add(LeakyReLU(alpha=0.2))model.add(Dense(1, activation='sigmoid'))model.summary()img = Input(shape=img_shape)validity = model(img)return model(img, validity)def train(self, epochs, batch_size=128, save_interval=50):# 加载数据集(X_train, _), (_, _) = mnist.load_data()X_train = (X_train.astype(np.float32) - 127.5) / 127.5X_train = np.expand_dims(X_train, axis=3)half_batch = int(batch_size / 2)for epoch in range(epochs):# 训练判别器# 随机选择被输入的数据idx = np.random.randint(0, X_train.shape[0], half_batch)imgs = X_train[idx]noise = np.random.normal(0, 1, (half_batch, 100))gen_imgs = self.generator.predict(noise)d_loss_real = self.discriminator.train_on_batch(imgs, np.ones((half_batch, 1)))d_loss_fake = self.discriminator.train_on_batch(gen_imgs, np.zeros((half_batch, 1)))d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)# 训练生成器noise = np.random.normal(0, 1, (batch_size, 100))# 生成器开始“欺骗”valid_y = np.array([1] * batch_size)g_loss = self.combined.train_on_batch(noise, valid_y)# 输出过程print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100 * d_loss[1], g_loss))# 保存生成if epoch % save_interval == 0:self.save_imgs(epoch)def save_imgs(self, epoch):r, c = 5, 5noise = np.random.normal(0, 1, (r * c, 100))gen_imgs = self.generator.predict(noise)gen_imgs = 0.5 * gen_imgs + 0.5fig, axs = plt.subplots(r, c)cnt = 0for i in range(r):for j in range(c):axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')axs[i, j].axis('off')cnt += 1fig.savefig("gan/images/mnist_%d.png" % epoch)plt.close()if __name__ == '__main__':gan = GAN()gan.train(epochs=30000, batch_size=32, save_interval=200)
运行结果如下:忽略None, 哇哈哈。


