<<' 是位运算 , 可理解为计算×2的快速方法assert (self.stride != 1) or (input_c == branch_features << 1)if self.stride == 2:# 两个分支均有处理# 分支1self.branch1 = nn.Sequential(self.depthwise_conv(input_c, input_c, kernel_s=3, stride=self.stride, padding=1),nn.BatchNorm2d(input_c),nn.Conv2d(input_c, branch_features, kernel_size=1, stride=1, padding=0, bias=False),nn.BatchNorm2d(branch_features),nn.ReLU(inplace=True))else:self.branch1 = nn.Sequential()# stride=1 不做任何处理# 分支2 只有DW conv 的stride不同self.branch2 = nn.Sequential(nn.Conv2d(input_c if self.stride > 1 else branch_features, branch_features, kernel_size=1,stride=1, padding=0, bias=False),nn.BatchNorm2d(branch_features),nn.ReLU(inplace=True),self.depthwise_conv(branch_features, branch_features, kernel_s=3, stride=self.stride, padding=1),nn.BatchNorm2d(branch_features),nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False),nn.BatchNorm2d(branch_features),nn.ReLU(inplace=True))# DW Conv@staticmethoddef depthwise_conv(input_c: int,output_c: int,kernel_s: int,stride: int = 1,padding: int = 0,bias: bool = False) -> nn.Conv2d:return nn.Conv2d(in_channels=input_c, out_channels=output_c, kernel_size=kernel_s,stride=stride, padding=padding, bias=bias, groups=input_c)def forward(self, x: Tensor) -> Tensor:if self.stride == 1:x1, x2 = x.chunk(2, dim=1)# 均分 channelout = torch.cat((x1, self.branch2(x2)), dim=1)else:out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)out = channel_shuffle(out, 2)return outclass ShuffleNetV2(nn.Module):def __init__(self,stages_repeats: List[int],# stege重复次数stages_out_channels: List[int],# 输出特征矩阵channelnum_classes: int = 1000,inverted_residual: Callable[..., nn.Module] = InvertedResidual):super(ShuffleNetV2, self).__init__()# 参数检查if len(stages_repeats) != 3:raise ValueError("expected stages_repeats as list of 3 positive ints")if len(stages_out_channels) != 5:raise ValueError("expected stages_out_channels as list of 5 positive ints")self._stage_out_channels = stages_out_channels# input RGB imageinput_channels = 3output_channels = self._stage_out_channels[0]# 第一次卷积+下采样self.conv1 = nn.Sequential(nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=2, padding=1, bias=False),nn.BatchNorm2d(output_channels),nn.ReLU(inplace=True))input_channels = output_channelsself.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)# stage模块# Static annotations for mypyself.stage2: nn.Sequentialself.stage3: nn.Sequentialself.stage4: nn.Sequentialstage_names = ["stage{}".format(i) for i in [2, 3, 4]]for name, repeats, output_channels in zip(stage_names, stages_repeats,self._stage_out_channels[1:]):seq = [inverted_residual(input_channels, output_channels, 2)]# 第一层stride=2for i in range(repeats - 1):seq.append(inverted_residual(output_channels, output_channels, 1))setattr(self, name, nn.Sequential(*seq))input_channels = output_channels# Conv5output_channels = self._stage_out_channels[-1]self.conv5 = nn.Sequential(nn.Conv2d(input_channels, output_channels, kernel_size=1, stride=1, padding=0, bias=False),nn.BatchNorm2d(output_channels),nn.ReLU(inplace=True))self.fc = nn.Linear(output_channels, num_classes)def _forward_impl(self, x: Tensor) -> Tensor:# See note [TorchScript super()]x = self.conv1(x)x = self.maxpool(x)x = self.stage2(x)x = self.stage3(x)x = self.stage4(x)x = self.conv5(x)x = x.mean([2, 3])# global poolx = self.fc(x)return xdef forward(self, x: Tensor) -> Tensor:return self._forward_impl(x)def shufflenet_v2_x0_5(num_classes=1000):"""Constructs a ShuffleNetV2 with 0.5x output channels, as described in`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"`.weight: https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth:param num_classes::return:"""model = ShuffleNetV2(stages_repeats=[4, 8, 4],stages_out_channels=[24, 48, 96, 192, 1024],num_classes=num_classes)return modeldef shufflenet_v2_x1_0(num_classes=1000):"""Constructs a ShuffleNetV2 with 1.0x output channels, as described in`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"`.weight: https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth:param num_classes::return:"""model = ShuffleNetV2(stages_repeats=[4, 8, 4],stages_out_channels=[24, 116, 232, 464, 1024],num_classes=num_classes)return modeldef shufflenet_v2_x1_5(num_classes=1000):"""Constructs a ShuffleNetV2 with 1.0x output channels, as described in`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"`.weight: https://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pth:param num_classes::return:"""model = ShuffleNetV2(stages_repeats=[4, 8, 4],stages_out_channels=[24, 176, 352, 704, 1024],num_classes=num_classes)return modeldef shufflenet_v2_x2_0(num_classes=1000):"""Constructs a ShuffleNetV2 with 1.0x output channels, as described in`"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design"`.weight: https://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pth:param num_classes::return:"""model = ShuffleNetV2(stages_repeats=[4, 8, 4],stages_out_channels=[24, 244, 488, 976, 2048],num_classes=num_classes)return model
- 【IMX6ULL驱动开发学习】09.Linux之I2C驱动框架简介和驱动程序模板
- Pytorch之CIFAR10分类卷积神经网络
- OpenCV之YOLOv2-tiny目标检测
- Pytorch之ResNet图像分类
- 乌合之众:大众心理研究
- 移动端vr技术探索之VrPanoramaView
- 二 Pytorch —— 激活函数、损失函数及其梯度
- Web自动化之页面元素定位---Xpath
- 现代简约四口之家,设计上兼顾不同年龄段的需求
- WordPress站点迁移及阿里云空间备案