Source code for openspeech.modules.add_normalization

# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import torch.nn as nn


[docs]class AddNorm(nn.Module): """ Add & Normalization layer proposed in "Attention Is All You Need". Transformer employ a residual connection around each of the two sub-layers, (Multi-Head Attention & Feed-Forward) followed by layer normalization. """ def __init__(self, sublayer: nn.Module, d_model: int = 512) -> None: super(AddNorm, self).__init__() self.sublayer = sublayer self.layer_norm = nn.LayerNorm(d_model) def forward(self, *args): residual = args[0] outputs = self.sublayer(*args) if isinstance(outputs, tuple): return self.layer_norm(outputs[0] + residual), outputs[1] return self.layer_norm(outputs + residual)