From 09d81b44a788b5336672c352023cbcd7c5130639 Mon Sep 17 00:00:00 2001 From: Yuekai Zhang Date: Mon, 21 Apr 2025 17:10:25 +0800 Subject: [PATCH] change padding side name --- egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py | 4 ++-- egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py index 22f627ecc..fb3921ba3 100644 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/model.py @@ -59,7 +59,7 @@ class SPEECH_LLM(nn.Module): llm: nn.Module, encoder_projector: nn.Module, codec_lm: nn.Module = None, - use_flash_attention: bool = False, + codec_lm_padding_side: str = "left", ): super().__init__() self.encoder = encoder @@ -74,7 +74,7 @@ class SPEECH_LLM(nn.Module): self.codec_lm.config.hidden_size, self.codec_lm.config.vocab_size ) self.loss_fct = torch.nn.CrossEntropyLoss() - self.codec_lm_padding_side = "left" if use_flash_attention else "right" + self.codec_lm_padding_side = codec_lm_padding_side def _merge_input_ids_with_speech_features( self, speech_features, inputs_embeds, input_ids, attention_mask, labels=None diff --git a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py index 143c10c68..ef7e7a464 100755 --- a/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py +++ b/egs/speech_llm/SPEECH2SPEECH/slam_omni/train.py @@ -793,7 +793,7 @@ def run(rank, world_size, args): llm, encoder_projector, codec_lm, - params.use_flash_attn, + codec_lm_padding_side= "left" if params.use_flash_attn else "right", ) if params.pretrained_model_path: