【Darknet-53】YOLOv3 backbone Darknet-53 详解

【Darknet-53】YOLOv3 backbone Darknet-53 详解,第1张

文章目录
  • 1 模型计算量与参数量
  • 2 Darknet-53网络
  • 3 感谢链接

1 模型计算量与参数量

模型计算量与参数量的计算方式主要有两种,一种是使用thop库,一种是使用torchsummaryX。


  • 使用pip install thop安装thop库
  • 使用pip install torchsummaryX安装torchsummaryX库
2 Darknet-53网络

可直接运行下方代码,结合注释和结果理解

本例中darknet53主要用于yolov3中的主干网络

import math
from collections import OrderedDict
import torch.nn as nn
import torch

from torchsummaryX import summary
from thop import profile

# ---------------------------------------------------------------------#
#   残差结构
#   利用一个1x1卷积下降通道数,然后利用一个3x3卷积提取特征并且上升通道数
#   最后接上一个残差边
# ---------------------------------------------------------------------#

# ---------------基本残差块-----------------
class BasicBlock(nn.Module):
    def __init__(self, inplanes, planes):       # inplanes是一个int,planes是一个list,包括两个数字
        super(BasicBlock, self).__init__()      # 1x1卷积下降通道数,3x3卷积上升通道数,减少参数
        self.conv1 = nn.Conv2d(inplanes, planes[0], kernel_size=1, stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(planes[0])
        self.relu1 = nn.LeakyReLU(0.1)

        self.conv2 = nn.Conv2d(planes[0], planes[1], kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes[1])
        self.relu2 = nn.LeakyReLU(0.1)

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu1(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu2(out)

        out += residual
        return out


class DarkNet(nn.Module):
    def __init__(self, layers):
        super(DarkNet, self).__init__()
        self.inplanes = 32
        # H,W,C:416,416,3 -> 416,416,32
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes)
        self.relu1 = nn.LeakyReLU(0.1)

        # ---------------------------------------#
        #   下面利用_make_layer进行残差块堆叠
        # ---------------------------------------#
        # 416,416,32 -> 208,208,64
        self.layer1 = self._make_layer([32, 64], layers[0])     # self._make_layer(planes, blocks)
        # 208,208,64 -> 104,104,128
        self.layer2 = self._make_layer([64, 128], layers[1])
        # 104,104,128 -> 52,52,256
        self.layer3 = self._make_layer([128, 256], layers[2])
        # 52,52,256 -> 26,26,512
        self.layer4 = self._make_layer([256, 512], layers[3])
        # 26,26,512 -> 13,13,1024
        self.layer5 = self._make_layer([512, 1024], layers[4])

        self.layers_out_filters = [64, 128, 256, 512, 1024]  # 表示几个结构块的输出通道数

        # 进行权值初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    # ---------------------------------------------------------------------#
    #   在每一个layer里面,首先利用一个步长为2的3x3卷积进行下采样
    #   然后进行残差结构的堆叠
    # ---------------------------------------------------------------------#
    def _make_layer(self, planes, blocks):  # blocks表示堆叠该结构块次数
        layers = []
        # ---------------------------------------------#
        # 	下采样,步长为2,卷积核大小为3,
        #	特征图长和宽压缩、通道数扩张到planes[1]
        # ---------------------------------------------#
        layers.append(("ds_conv", nn.Conv2d(self.inplanes, planes[1], kernel_size=3, stride=2, padding=1, bias=False)))
        layers.append(("ds_bn", nn.BatchNorm2d(planes[1])))
        layers.append(("ds_relu", nn.LeakyReLU(0.1)))
        # ---------------------------------------------#
        # 	加入残差结构,利用一个1x1卷积下降通道数到planes[0],
        # 	然后利用一个3x3卷积提取特征并且上升通道数到planes[1]
        # ---------------------------------------------# 
        self.inplanes = planes[1]
        for i in range(0, blocks):
            layers.append(("residual_{}".format(i), BasicBlock(self.inplanes, planes)))
        return nn.Sequential(OrderedDict(layers))

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.layer1(x)
        x = self.layer2(x)
        # -----------------------------------------------------#
        # 	这样区分写,是因为在yolov3中需要使用这三个输出特征层进行后续处理
        # -----------------------------------------------------#
        out3 = self.layer3(x)           
        out4 = self.layer4(out3)
        out5 = self.layer5(out4)

        return out3, out4, out5


def darknet53():
    model = DarkNet([1, 2, 8, 8, 4])
    return model


if __name__ == '__main__':
    model = darknet53()
    print(model)

    # --------------------------------------#
    #   使用thop计算模型计算量与参数量
    # --------------------------------------#
    input = torch.randn(1, 3, 416, 416)     # batch_size, channels, height, weight, NCHW
    flops, params = profile(model, inputs=(input,))     # 计算量,参数量
    print(flops, params)

    # --------------------------------------#
    #   使用TorchsummaryX计算模型计算量与参数量
    # --------------------------------------#
    summary(model, torch.zeros(1, 3, 416, 416))

thop库输出结果:警告表示该部分计算量与参数量的对待方式(不算在内)

[INFO] Register count_convNd() for .
[WARN] Cannot find rule for . Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for . Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for . Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for . Treat it as zero Macs and zero Params.
[WARN] Cannot find rule for . Treat it as zero Macs and zero Params.
24515805184.0 40549216.0    # 计算量 参数量

torchsummaryX库输出结果:可展示每层的情况,参数量和计算量

====================================================================================================
                                            Kernel Shape        Output Shape  \
Layer                                                                          
0_conv1                                    [3, 32, 3, 3]   [1, 32, 416, 416]   
1_bn1                                               [32]   [1, 32, 416, 416]   
2_relu1                                                -   [1, 32, 416, 416]   
3_layer1.Conv2d_ds_conv                   [32, 64, 3, 3]   [1, 64, 208, 208]   
4_layer1.BatchNorm2d_ds_bn                          [64]   [1, 64, 208, 208]   
...
                                          Params    Mult-Adds  
Layer                                                          
0_conv1                                    864.0  149.520384M  
1_bn1                                       64.0         32.0  
2_relu1                                        -            -  
3_layer1.Conv2d_ds_conv                  18.432k  797.442048M  
...
153_layer5.residual_3.Conv2d_conv2     4.718592M  797.442048M  
154_layer5.residual_3.BatchNorm2d_bn2     2.048k       1.024k  
155_layer5.residual_3.LeakyReLU_relu2          -            -  
----------------------------------------------------------------------------------------------------
                            Totals
Total params            40.584928M        # 参数量
Trainable params        40.584928M
Non-trainable params           0.0
Mult-Adds             24.51582304G        # 计算量 
====================================================================================================
3 感谢链接
https://blog.csdn.net/weixin_44791964/article/details/105310627

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/langs/578556.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-04-11
下一篇 2022-04-11

发表评论

登录后才能评论

评论列表(0条)

保存