1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
| class AttentionBlock(nn.Layer): def __init__(self, F_g, F_l, F_out): super().__init__() self.W_g = nn.Sequential( nn.Conv2D( F_g, F_out, kernel_size=1, stride=1, padding=0), nn.BatchNorm2D(F_out))
self.W_x = nn.Sequential( nn.Conv2D( F_l, F_out, kernel_size=1, stride=1, padding=0), nn.BatchNorm2D(F_out))
self.psi = nn.Sequential( nn.Conv2D( F_out, 1, kernel_size=1, stride=1, padding=0), nn.BatchNorm2D(1), nn.Sigmoid())
self.relu = nn.ReLU()
def forward(self, g, x): g1 = self.W_g(g) x1 = self.W_x(x) psi = self.relu(g1 + x1) psi = self.psi(psi) res = x * psi return res
|