Source code for openspeech.criterion.cross_entropy.cross_entropy

# MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

import torch.nn as nn
from torch import Tensor
from omegaconf import DictConfig

from .. import register_criterion
from ..cross_entropy.configuration import CrossEntropyLossConfigs
from ...tokenizers.tokenizer import Tokenizer


[docs]@register_criterion("cross_entropy", dataclass=CrossEntropyLossConfigs) class CrossEntropyLoss(nn.Module): r""" The negative log likelihood loss. It is useful to train a classification problem with `C` classes. If provided, the optional argument :attr:`weight` should be a 1D Tensor assigning weight to each of the classes. This is particularly useful when you have an unbalanced training set. The `input` given through a forward call is expected to contain log-probabilities of each class. `input` has to be a Tensor of size either :math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1` for the `K`-dimensional case (described later). Obtaining log-probabilities in a neural network is easily achieved by adding a `LogSoftmax` layer in the last layer of your network. You may use `CrossEntropyLoss` instead, if you prefer not to add an extra layer. The `target` that this loss expects should be a class index in the range :math:`[0, C-1]` where `C = number of classes`; if `ignore_index` is specified, this loss also accepts this class index (this index may not necessarily be in the class range). The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad l_n = - w_{y_n} x_{n,y_n}, \quad w_{c} = \text{weight}[c] \cdot \mathbb{1}\{c \not= \text{ignore\_index}\}, where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \ell(x, y) = \begin{cases} \sum_{n=1}^N \frac{1}{\sum_{n=1}^N w_{y_n}} l_n, & \text{if reduction} = \text{`mean';}\\ \sum_{n=1}^N l_n, & \text{if reduction} = \text{`sum'.} \end{cases} Can also be used for higher dimension inputs, such as 2D images, by providing an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`, where :math:`K` is the number of dimensions, and a target of appropriate shape (see below). In the case of images, it computes NLL loss per-pixel. Args: configs (DictConfig): hydra configuration set tokenizer (Tokenizer): tokenizer is in charge of preparing the inputs for a model. Inputs: logits, targets - logits (torch.FloatTensor): probability distribution value from model and it has a logarithm shape. The `FloatTensor` of size ``(batch, seq_length, num_classes)`` - targets (torch.LongTensor): ground-truth encoded to integers which directly point a word in label. The `LongTensor` of size ``(batch, target_length)`` Returns: loss * loss (float): loss for training Examples:: >>> B, T1, C, T2 = 3, 128, 4, 10 >>> loss = CrossEntropyLoss() >>> inputs = torch.randn(B, T1, C, requires_grad=True) >>> targets = torch.empty(B, T2, dtype=torch.long).random_(T2) >>> outputs = loss(inputs, targets) >>> outputs.backward() """ def __init__( self, configs: DictConfig, tokenizer: Tokenizer, ) -> None: super(CrossEntropyLoss, self).__init__() self.cross_entropy_loss = nn.CrossEntropyLoss( reduction=configs.criterion.reduction, ignore_index=tokenizer.pad_id, ) def forward(self, logits: Tensor, targets: Tensor) -> Tensor: max_target_length = targets.size(1) max_logits_length = logits.size(1) if max_logits_length > max_target_length: logits = logits[:, :max_target_length, :] elif max_target_length > max_logits_length: targets = targets[:, :max_logits_length] logits = logits.contiguous().view(-1, logits.size(-1)) return self.cross_entropy_loss( logits.contiguous().view(-1, logits.size(-1)), targets.contiguous().view(-1), )