Source code for openspeech.modules.conformer_block

# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import torch.nn as nn
from torch import Tensor

from openspeech.modules.conformer_attention_module import MultiHeadedSelfAttentionModule
from openspeech.modules.conformer_convolution_module import ConformerConvModule
from openspeech.modules.conformer_feed_forward_module import FeedForwardModule
from openspeech.modules.residual_connection_module import ResidualConnectionModule


[docs]class ConformerBlock(nn.Module): r""" Conformer block contains two Feed Forward modules sandwiching the Multi-Headed Self-Attention module and the Convolution module. This sandwich structure is inspired by Macaron-Net, which proposes replacing the original feed-forward layer in the Transformer block into two half-step feed-forward layers, one before the attention layer and one after. Args: encoder_dim (int, optional): Dimension of conformer encoders num_attention_heads (int, optional): Number of attention heads feed_forward_expansion_factor (int, optional): Expansion factor of feed forward module conv_expansion_factor (int, optional): Expansion factor of conformer convolution module feed_forward_dropout_p (float, optional): Probability of feed forward module dropout attention_dropout_p (float, optional): Probability of attention module dropout conv_dropout_p (float, optional): Probability of conformer convolution module dropout conv_kernel_size (int or tuple, optional): Size of the convolving kernel half_step_residual (bool): Flag indication whether to use half step residual or not Inputs: inputs - **inputs** (batch, time, dim): Tensor containing input vector Returns: outputs - **outputs** (batch, time, dim): Tensor produces by conformer block. """ def __init__( self, encoder_dim: int = 512, num_attention_heads: int = 8, feed_forward_expansion_factor: int = 4, conv_expansion_factor: int = 2, feed_forward_dropout_p: float = 0.1, attention_dropout_p: float = 0.1, conv_dropout_p: float = 0.1, conv_kernel_size: int = 31, half_step_residual: bool = True, ) -> None: super(ConformerBlock, self).__init__() if half_step_residual: self.feed_forward_residual_factor = 0.5 else: self.feed_forward_residual_factor = 1 self.sequential = nn.Sequential( ResidualConnectionModule( module=FeedForwardModule( encoder_dim=encoder_dim, expansion_factor=feed_forward_expansion_factor, dropout_p=feed_forward_dropout_p, ), module_factor=self.feed_forward_residual_factor, ), ResidualConnectionModule( module=MultiHeadedSelfAttentionModule( d_model=encoder_dim, num_heads=num_attention_heads, dropout_p=attention_dropout_p, ), ), ResidualConnectionModule( module=ConformerConvModule( in_channels=encoder_dim, kernel_size=conv_kernel_size, expansion_factor=conv_expansion_factor, dropout_p=conv_dropout_p, ), ), ResidualConnectionModule( module=FeedForwardModule( encoder_dim=encoder_dim, expansion_factor=feed_forward_expansion_factor, dropout_p=feed_forward_dropout_p, ), module_factor=self.feed_forward_residual_factor, ), nn.LayerNorm(encoder_dim), ) def forward(self, inputs: Tensor) -> Tensor: return self.sequential(inputs)