2020-09-03

tech2024-12-17  5

pytorch 官方教程中文版 pytroch 之迁移学习

from __future__ import print_function,division import torch import torch.nn as nn import torch.optim as optim from torch.optim import lr_scheduler import numpy as np import torchvision from torchvision import datasets,models,transforms import matplotlib.pyplot as plt import time import os import copy plt.ion()#interactive mode # 训练集数据扩充和归一化 # 在验证集上仅需要归一化 data_transforms = { 'train':transforms.Compose([ transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) ]), 'val': transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) ]), } data_dir = 'D:\pythonfile\Pytorch Learning\hymenoptera_data' image_datasets = {x:datasets.ImageFolder(os.path.join(data_dir,x), data_transforms[x]) for x in ['train','val']} dataloaders = {x:torch.utils.data.DataLoader(image_datasets[x],batch_size=4, shuffle=True,num_workers=0) for x in ['train','val']} dataset_sizes = {x:len(image_datasets[x]) for x in ['train','val']} class_names = image_datasets['train'].classes device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') #可视化部分图像数据 def imshow(inp,title=None): inp = inp.numpy().transpose((1,2,0)) mean = np.array([0.485,0.456,0.406]) std = np.array([0.229,0.224,0.225]) inp = std*inp + mean inp = np.clip(inp,0,1) plt.imshow(inp) if title is not None: plt.title(title) plt.pause(0.001)#稍作停顿,以便更新数据 inputs, classes = next(iter(dataloaders['train'])) out = torchvision.utils.make_grid(inputs) imshow(out,title=[class_names[x] for x in classes]) # train model def train_model(model,criterion,optimizer,scheduler,num_epochs=25): since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) #state_dict()返回包含整个状态的字典(参数和缓存) #deepcopy拷贝对象,深拷贝(拷贝对象及其对象) best__acc = 0.0 #定义一个准确率 for epoch in range(num_epochs): print('Epoch {}/{}'.format(epoch,num_epochs - 1)) print('-'*10) for phase in ['train','val']: if phase =='train': scheduler.step()#打开动态优化其学习率 model.train()#将模型设置为训练模式 else: model.eval() running_loss = 0.0 running_corrects = 0#计算正确数次数 for inputs,labels in dataloaders[phase]: inputs = inputs.to(device) labels = labels.to(device) optimizer.zero_grad() with torch.set_grad_enabled(phase =='train'): outputs = model(inputs) #向前传播 _,preds = torch.max(outputs,1) loss = criterion(outputs,labels) if phase =='train': loss.backward() #只对训练集计算梯度 optimizer.step() running_loss += loss.item()*inputs.size(0) #返回的损失是批次中所有示例的平均值,所以乘以批次尺寸, 计算小批次所有损失 running_corrects +=torch.sum(preds == labels.data) epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] print('{} Loss:{:.4f} Acc:{:.4f}'.format(phase,epoch_loss,epoch_acc)) if phase == 'val' and epoch_acc > best__acc: best__acc = epoch_acc best__model_wts = copy.deepcopy(model.state_dict()) #保存准确率最高的模型的参数(深度复制) print() time_elapsed = time.time() - since #现在的时间减去开始时间 print('Training complete in {:.0f}m {:.0f}'.format( time_elapsed // 60,time_elapsed % 60 )) print('Best val Acc:{:4f}'.format(best__acc)) #打印出时间和准确率 model.load_state_dict(best_model_wts) return model # 加载刚刚保存的模型返回模型 #可视化模型的预测结果 def visualize_model(model,num_images = 6): was_training = model.training model.eval() images_so_far = 0 fig = plt.figure() with torch.no_grad(): for i,(inputs,labels) in enumerate(dataloaders['val']): inputs = inputs.to(device) labels = labels.to(device) outputs = model(inputs) _,preds = torch.max(outputs,1) for j in range(inputs.size()[0]): images_so_far +=1 ax = plt.subplot(num_images//2,2,images_so_far) ax.axis('off') ax.set_title('predicted:{}'.format(class_names[preds[j]])) imshow(inputs.cpu().data[j]) if images_so_far == num_images: model.train(mode=was_training) return model.train(mode = was_training) ##微调ConvNet model_ft = models.resnet18(pretrained=True) num_ftrs = model_ft.fc.in_features model_ft.fc = nn.Linear(num_ftrs,2) model_ft = model_ft.to(device) criterion = nn.CrossEntropyLoss() optimizer_ft = optim.SGD(model_ft.parameters(),lr=0.001,momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,step_size=7,gamma = 0.1) model_ft = train_model(model_ft,criterion,optimizer_ft,exp_lr_scheduler,num_epochs=25) visualize_model(model_ft) #ConvNet作为固定特征提取器 model_conv = torchvision.models.resnet18(pretrained=True) for param in model_conv.parameters(): param.requires_grad = False num_ftrs = model_conv.fc.in_features model_conv.fc = nn.Linear(num_ftrs,2) model_conv = model_conv.to(device) criterion = nn.CrossEntropyLoss() optimizer_conv = optim.SGD(model_conv.fc.parameters(),lr=0.001,momentum=0.9) exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv,step_size=7,gamma=0.1) model_conv = train_model(model_conv,criterion,optimizer_conv,exp_lr_scheduler,num_epochs=25) visualize_model(model_conv) plt.ioff() plt.show()
最新回复(0)