Source code for openspeech.modules.batchnorm_relu_rnn

# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor


[docs]class BNReluRNN(nn.Module): r""" Recurrent neural network with batch normalization layer & ReLU activation function. Args: input_size (int): size of input hidden_state_dim (int): the number of features in the hidden state `h` rnn_type (str, optional): type of RNN cell (default: gru) bidirectional (bool, optional): if True, becomes a bidirectional encoders (defulat: True) dropout_p (float, optional): dropout probability (default: 0.1) Inputs: inputs, input_lengths - **inputs** (batch, time, dim): Tensor containing input vectors - **input_lengths**: Tensor containing containing sequence lengths Returns: outputs - **outputs**: Tensor produced by the BNReluRNN module """ supported_rnns = { 'lstm': nn.LSTM, 'gru': nn.GRU, 'rnn': nn.RNN, } def __init__( self, input_size: int, hidden_state_dim: int = 512, rnn_type: str = 'gru', bidirectional: bool = True, dropout_p: float = 0.1, ): super(BNReluRNN, self).__init__() self.hidden_state_dim = hidden_state_dim self.batch_norm = nn.BatchNorm1d(input_size) rnn_cell = self.supported_rnns[rnn_type] self.rnn = rnn_cell( input_size=input_size, hidden_size=hidden_state_dim, num_layers=1, bias=True, batch_first=True, dropout=dropout_p, bidirectional=bidirectional, ) def forward(self, inputs: Tensor, input_lengths: Tensor): total_length = inputs.size(0) inputs = F.relu(self.batch_norm(inputs.transpose(1, 2))) inputs = inputs.transpose(1, 2) outputs = nn.utils.rnn.pack_padded_sequence(inputs, input_lengths.cpu()) outputs, hidden_states = self.rnn(outputs) outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, total_length=total_length) return outputs