import torch import torch.nn as nn import torchvision.ops as ops class Net(nn.Module): def __init__(self, in_c, out_c, k=3): super().__init__() p = (k - 1) // 2 self.split_size = (2 * k * k, k * k) self.conv_offset = nn.Conv2d(in_c, 3 * k * k, k, padding=p) self.conv_deform = ops.DeformConv2d(in_c, out_c, k, padding=p) # initialize nn.init.constant_(self.conv_offset.weight, 0) nn.init.constant_(self.conv_offset.bias, 0) nn.init.kaiming_normal_(self.conv_deform.weight, mode='fan_out', nonlinearity='relu') def forward(self, x): offset, mask = torch.split(self.conv_offset(x), self.split_size, dim=1) mask = torch.sigmoid(mask) y = self.conv_deform(x, offset, mask) return y if __name__ == '__main__': input = torch.rand(4, 3, 240, 320) net = Net(3, 7, 3) # deform conv output = net(input) print(output.shape) # optimize lr = 0.01 ids = list(map(id, net.conv_offset.parameters())) base_param = filter(lambda p: id(p) not in ids, net.parameters()) optimizer = torch.optim.SGD( [ {'params': base_param}, {'params': net.conv_offset.parameters(), 'lr': 0.1 * lr} ], lr=lr, momentum=0.9 )
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)