Resnet50详解与实践 resnet网络结构详解( 二 )


注意:如果残差映射(F(x))的结果的维度与跳跃连接(x)的维度不同 , 那咱们是没有办法对它们两个进行相加操作的 , 必须对x进行升维操作 , 让他俩的维度相同时才能计算 。
升维的方法有两种:
1、用0填充;
2、采用1*1的卷积 。一般都是采用1*1的卷积 。
#以下是代码:#导入库mport torchimport torch.nn as nnimport torch.nn.functional as Ffrom torch.autograd import Variable#定义残差块(BasicBlock是小残差块 , Bottleneck是大残差块)class BasicBlock(nn.Module):#定义blockexpansion = 1def __init__(self, in_channels, channels, stride=1, downsample=None):#输入通道 , 输出通道 , stride , 下采样super(BasicBlock, self).__init__()self.conv1 = conv3x3(in_channels, channels, stride)self.bn1 = nn.BatchNorm2d(channels)self.relu = F.relu(inplace=True)self.conv2 = conv3x3(channels, channels)self.bn2 = nn.BatchNorm2d(channels)self.downsample = downsampleself.stride = stridedef forward(self, x):residual = xout = self.conv1(x)out = self.bn1(out)out = self.relu(out)out = self.conv2(out)out = self.bn2(out)if self.downsample is not None:residual = self.downsample(x)out += residualout = self.relu(out)return out#block输出class Bottleneck(nn.Module):expansion = 4def __init__(self, in_planes, planes, stride=1):super(Bottleneck, self).__init__()self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)self.bn1 = nn.BatchNorm2d(planes)self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)self.bn2 = nn.BatchNorm2d(planes)self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)self.bn3 = nn.BatchNorm2d(self.expansion*planes)self.shortcut = nn.Sequential()if stride != 1 or in_planes != self.expansion*planes:self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),nn.BatchNorm2d(self.expansion*planes))def forward(self, x):out = F.relu(self.bn1(self.conv1(x)))out = F.relu(self.bn2(self.conv2(out)))out = self.bn3(self.conv3(out))out += self.shortcut(x)out = F.relu(out)return out#定义残差网络class ResNet(nn.Module):def __init__(self, block, num_blocks, num_classes=9,embedding_size=256):super(ResNet, self).__init__()self.in_planes = 64self.conv1 = nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1, bias=False)self.bn1 = nn.BatchNorm2d(64)self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)self.avg_pool = nn.AdaptiveAvgPool2d([4, 1])self.fc=nn.Linear(512*4, embedding_size)self.linear = nn.Linear(embedding_size, num_classes)def _make_layer(self, block, planes, num_blocks, stride):strides = [stride] + [1]*(num_blocks-1)layers = []for stride in strides:layers.append(block(self.in_planes, planes, stride))self.in_planes = planes * block.expansionreturn nn.Sequential(*layers)def forward(self, x):x = torch.tensor(x, dtype=torch.float32)out = F.relu(self.bn1(self.conv1(x)))out = self.layer1(out)out = self.layer2(out)out = self.layer3(out)out = self.layer4(out)out =self.avg_pool(out)out = out.view(out.size(0), -1)embedding=self.fc(out)out = self.linear(embedding)return out,embedding#从18层的到101层的 , 可以根据自己需要选择网络大小 , 大的网络选用了大的残差块 , #第一个参数指明用哪个残差块 , 第二个参数是一个列表 , 指明残差块的数量 。def ResNet18():return ResNet(BasicBlock, [2,2,2,2])def ResNet34():return ResNet(BasicBlock, [3,4,6,3])def ResNet50():return ResNet(Bottleneck, [3,4,6,3])def ResNet101():return ResNet(Bottleneck, [3,4,23,3])def ResNet152():return ResNet(Bottleneck, [3,8,36,3])总结:在使用了ResNet的结构后 , 可以发现层数不断加深导致的训练集上误差增大的现象被消除了 , ResNet网络的训练误差会随着层数增加而逐渐减少 , 并且在测试集上的表现也会变好 。原因在于 , Resnet学习的是残差函数F(x) = H(x) – x, 这里如果F(x) = 0, 那么就是上面提到的恒等映射 。事实上 , resnet是“shortcut connections”的,在connections是在恒等映射下的特殊情况 , 学到的残差为0时 , 它没有引入额外的参数和计算复杂度 , 且不会降低精度 。在优化目标函数是逼近一个恒等映射 identity mapping, 而学习的残差不为0时 ,  那么学习找到对恒等映射的扰动会比重新学习一个映射函数要更容易 。

推荐阅读