1. ResNet模型
2. 左圖爲18層,34層模型的一個殘差塊,右圖爲50層,101層,152層的殘差塊
3. 18層,34層的殘差塊(虛線部分表示輸入要進行一次下采樣操作)
4. 50,101,152層的殘差塊
5. 34層的模型結構圖,下圖殘差塊分爲4部分,2,3,4部分的第一個殘差塊是需要對輸入進行下采樣操作的:
6. 模型代碼:(18和34層的殘差塊是相似的,50/101/152層的殘差塊是一樣的,這兩種殘差塊分開定義,注意2,3,4部分中的第一個殘差塊的下采樣操作)
import torch.nn as nn
import torch
class BasicBlock(nn.Module): # 18層或34層殘差網絡的 殘差模塊
expansion = 1 # 記錄各個層的卷積核個數是否有變化
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=3, stride=stride, padding=1, bias=False) # 有無bias對bn沒多大影響
self.bn1 = nn.BatchNorm2d(out_channel)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channel)
self.downsample = downsample
def forward(self, x):
identity = x # 記錄上一個殘差模塊輸出的結果
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module): # 50層,101層,152層的殘差網絡的 殘差模塊
expansion = 4 # 第三層卷積核的個數(256,512,1024,2048)是第一層或第二層的卷積核個數(64,128,256,512)的4倍
def __init__(self, in_channel, out_channel, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=1, stride=1, bias=False) # squeeze channels 降維
self.bn1 = nn.BatchNorm2d(out_channel)
# self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,
kernel_size=3, stride=stride, bias=False, padding=1)
self.bn2 = nn.BatchNorm2d(out_channel)
# self.relu = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel*self.expansion,
kernel_size=1, stride=1, bias=False) # unsqueeze channels 升維
self.bn3 = nn.BatchNorm2d(out_channel*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module): # 網絡框架
# 參數:block 如果定義的是18層或34層的框架 就是BasicBlock, 如果定義的是50,101,152層的框架,就是Bottleneck
# blocks_num 殘差層的個數,對應34層的殘差網絡就是 [3,4,6,3]
# include_top 方便以後在resnet的基礎上搭建更復雜的網絡
def __init__(self, block, blocks_num, num_classes=1000, include_top=True):
super(ResNet, self).__init__()
self.include_top = include_top
self.in_channel = 64 # 上一層的輸出channel數,及這一層的輸入channel數
# part 1 卷積+池化 conv1+pooling
self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(self.in_channel)
self.relu = nn.ReLU(inplace=True) #利用in-place計算可以節省內(顯)存,同時還可以省去反覆申請和釋放內存的時間。但是會對原變量覆蓋,只要不帶來錯誤就用。計算結果不會有影響
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# part 2 殘差網絡的四部分殘差塊:conv2,3,4,5
self.layer1 = self._make_layer(block, 64, blocks_num[0]) # 5中不同深度的殘差網絡的第一部分殘差塊個數:2,3,3,3,3
self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)# 5中不同深度的殘差網絡的第一部分殘差塊個數:2,4,4,4,8
self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)# 5中不同深度的殘差網絡的第一部分殘差塊個數:2,6,6,23,36
self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)# 5中不同深度的殘差網絡的第一部分殘差塊個數:2,3,3,3,3
# part 3 平均池化層+全連接層
if self.include_top:
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) # output size = (1, 1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
# 卷積層的初始化操作
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
def _make_layer(self, block, channel, block_num, stride=1):
downsample = None
if stride != 1 or self.in_channel != channel * block.expansion:
# 虛線部分
downsample = nn.Sequential(
nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(channel * block.expansion))
layers = []
layers.append(block(self.in_channel, channel, downsample=downsample, stride=stride))
self.in_channel = channel * block.expansion
for _ in range(1, block_num):
layers.append(block(self.in_channel, channel)) # stride=1,downsample=None
return nn.Sequential(*layers) # 將list轉換爲非關鍵字參數傳入
def forward(self, x):
# part 1
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
# part 2
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# part 3
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def resnet34(num_classes=1000, include_top=True):
return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
def resnet101(num_classes=1000, include_top=True):
return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)
7. 實驗源碼