Paddle之GAN学习心得

tech2026-04-03  1

GAN学习心得

PS:请积极指出不足,本文为学习心得,并不是文献综述

GAN的基本理念 GAN类似于一种零和博弈的思想,用通俗的语言来讲就是自己去欺骗自己,从而让自己仿佛达到超人般的水准,但是GAN的实现是建立在硬件与神经网络基础上,因此学习力要强于人类,所以在每一次欺骗自己的过程里都有参数的学习与更新。 GAN有很多应用,比如可以将一张图片通过GAN逐渐变成另外一张图片。基于类似的想法,我们今天使用随机产生的噪声,通过生成网络生成同真实照片有着相同size的假图片。通过判别网络结合生成网络,根据反向传播,将这些噪声逐渐地变成真实的照片。可以简单地将这一过程比作人类的绘画过程。

GAN的基本框架 GAN有一个生成网络,一个判别网络。 生成网络可以将随机的噪声通过全连接以及卷积等操作变成一个同实际图片相同size的矩阵。 判别网络可以理解为正常的图像分类网络。根据卷积神经网络将图片的信息进行提取,flatten展成一维向量,经全连接处理后变更为Batch_size大小的矩阵,选择合适的函数就可以得到相应的损失。

GAN的实现

导入所需要的一些库

import numpy as np import paddle import paddle.fluid as fluid import cv2 from paddle.fluid.dygraph import Linear,Conv2D,BatchNorm,Pool2D from paddle.fluid.layers import mean,accuracy import matplotlib.pyplot as plt %matplotlib inline

生成网络的实现

class Gen(fluid.dygraph.Layer): def __init__(self): super(Gen,self).__init__() self.fc1=Linear(input_dim=100,output_dim=1024) self.bias1=fluid.BatchNorm(num_channels=1024,act="tanh") self.fc2=Linear(input_dim=1024,output_dim=128*7*7) self.bias2=fluid.BatchNorm(num_channels=128*7*7,act="tanh") self.conv1=Conv2D(num_channels=128,num_filters=64,filter_size=5,padding=2) self.bn3=fluid.dygraph.BatchNorm(num_channels=64,act="tanh") self.conv2=Conv2D(num_channels=64,num_filters=1,filter_size=5,padding=2,act="tanh") def forward(self,z): z=self.fc1(z) z=self.bias1(z) z=self.fc2(z) z=self.bias2(z) z=fluid.layers.reshape(z,shape=[-1,128,7,7]) z=fluid.layers.image_resize(input=z,scale=2) z=self.conv1(z) z=self.bn3(z) z=fluid.layers.image_resize(input=z,scale=2) z=self.conv2(z) return z

判别网络的实现

class Dis(fluid.dygraph.Layer): def __init__(self): super(Dis,self).__init__() self.conv1=Conv2D(num_channels=1,num_filters=64,filter_size=3) self.bn1=BatchNorm(num_channels=64,act='relu') self.pool=Pool2D(pool_size=2,pool_stride=2) self.conv2=Conv2D(num_channels=64,num_filters=128,filter_size=3) self.bn2=BatchNorm(num_channels=128,act="relu") self.pool2=Pool2D(pool_size=2,pool_stride=2) self.fc1=Linear(input_dim=128*5*5,output_dim=1024) self.bnf1=BatchNorm(num_channels=1024,act="relu") self.fc2=Linear(input_dim=1024,output_dim=1) def forward(self,img): y=self.conv1(img) y=self.bn1(y) y=self.pool(y) y=self.conv2(y) y=self.bn2(y) y=self.pool2(y) y=fluid.layers.reshape(y,shape=[-1,128*5*5]) y=self.fc1(y) y=self.bnf1(y) y=self.fc2(y) return y

参数设置

epoches_num=5000 generator=paddle.dataset.mnist.train()

数据读取

image_batch=np.random.normal(0,1,(100,100)).astype("float32") real_image=fluid.io.batch(generator,batch_size=100) img_collection=[] for index,value in enumerate(real_image()): if len(value)!=100: continue iter_num+=1 for data_cur in value: img_collection.append(data_cur[0].reshape(1,28,28)) break img_collection=np.array(img_collection)

训练

iter_num=0 with fluid.dygraph.guard(): G=Gen() D=Dis() loss_record=[] real_optimizer=fluid.optimizer.AdamOptimizer(learning_rate=2e-4,parameter_list=G.parameters()) fake_optimizer=fluid.optimizer.AdamOptimizer(learning_rate=2e-4,parameter_list=G.parameters()) g_optimizer=fluid.optimizer.AdamOptimizer(learning_rate=2e-4,parameter_list=G.parameters()) for epoch in range(epoches_num): ones=fluid.dygraph.to_variable(np.ones([len(value),1])).astype("float32") real_x=fluid.dygraph.to_variable(np.array(img_collection).astype("float32")) prediction=D(real_x) loss=fluid.layers.sigmoid_cross_entropy_with_logits(prediction,ones) avg_loss=mean(loss) loss_record.append(avg_loss) if iter_num%500==0: print("{}iters' loss of real_image".format(iter_num),avg_loss.numpy()) fluid.save_dygraph(D.state_dict(),'./checkpoint/mnist_epoch{}'.format(iter_num)) #fluid.save_dygraph(real_optimizer.state_dict(),'./checkpoint/mnist_epoch{}'.format(iter_num)) avg_loss.backward() real_optimizer.minimize(avg_loss) D.clear_gradients() img_collection=[] noisy_x=fluid.dygraph.to_variable(image_batch) generation_noisy=G(noisy_x) generation_noisy_posibility=D(generation_noisy) ones_=fluid.dygraph.to_variable(np.ones([100,1])).astype("float32") loss_fake=fluid.layers.sigmoid_cross_entropy_with_logits(generation_noisy_posibility,ones_) avg_loss_fake=mean(loss_fake) avg_loss_fake.backward() g_optimizer.minimize(avg_loss) G.clear_gradients() if iter_num%500==0: print("{}iters' loss of fake images".format(iter_num),avg_loss_fake.numpy()) if iter_num%1000==0: visalization(generation_noisy.numpy())
最新回复(0)