From 40e1799adcf75d76451c9bcd1b6146bfe8a1f5b7 Mon Sep 17 00:00:00 2001 From: mrq Date: Mon, 19 Aug 2024 01:03:35 -0500 Subject: [PATCH] fixed xformers and flash_attn to actually work now --- vall_e/models/arch/llama.py | 141 +++++++++++++++++------------------- vall_e/models/base.py | 9 +-- 2 files changed, 68 insertions(+), 82 deletions(-) diff --git a/vall_e/models/arch/llama.py b/vall_e/models/arch/llama.py index 293c706..5dff62d 100644 --- a/vall_e/models/arch/llama.py +++ b/vall_e/models/arch/llama.py @@ -1,5 +1,6 @@ # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py +import math import torch from typing import Literal, overload, Optional, Tuple @@ -82,15 +83,13 @@ try: except Exception as e: print("Error while querying for `flash_attn` | support", e) -""" try: - from xformers.ops import LowerTriangularMask from xformers.ops.fmha import memory_efficient_attention + from xformers.ops.fmha.attn_bias import LowerTriangularFromBottomRightMask, LowerTriangularMask AVAILABLE_ATTENTIONS.append("xformers") except Exception as e: print("Error while importing `xformers`", e) -""" if torch.backends.cuda.flash_sdp_enabled(): AVAILABLE_ATTENTIONS.append("flash") @@ -126,7 +125,7 @@ class LlamaAttention_Adapted(LlamaAttention): super().__init__(*args, **kwargs) - # Adapted from LlamaAttention.forward + # Adapted from LlamaAttention.forward def forward( self, hidden_states: torch.Tensor, @@ -152,6 +151,7 @@ class LlamaAttention_Adapted(LlamaAttention): position_embeddings=position_embeddings, ) + dropout_rate = self.attention_dropout if self.training else 0.0 bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states) @@ -173,6 +173,60 @@ class LlamaAttention_Adapted(LlamaAttention): cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + if self.mode in ["xformers", "flash_attn"]: + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + """ + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + """ + + if self.mode == "flash_attn": + attn_output = flash_attn_func( + query_states, + key_states, + value_states, + causal=True, + softmax_scale=1.0 / math.sqrt(self.head_dim), + dropout_p=dropout_rate, + ) + + attn_output = attn_output.reshape(bsz, q_len, -1).contiguous() + elif self.mode == "xformers": + attn_output = memory_efficient_attention( + query_states, + key_states, + value_states, + attn_bias = LowerTriangularMask() if attention_mask is None or attention_mask[0, 0, 0, 1] == 0 else None, + scale = 1.0 / math.sqrt(self.head_dim), + p=dropout_rate + ) + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + return attn_output, None, past_key_value + key_states = repeat_kv(key_states, self.num_key_value_groups) value_states = repeat_kv(value_states, self.num_key_value_groups) @@ -190,85 +244,20 @@ class LlamaAttention_Adapted(LlamaAttention): # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. is_causal = True if causal_mask is None and q_len > 1 else False - - if self.mode == "flash_attn": - attn_output = flash_attn_func( + + with torch.nn.attention.sdpa_kernel(self.mode): + attn_output = torch.nn.functional.scaled_dot_product_attention( query_states, key_states, value_states, - causal=True, - softmax_scale=None, # 1, / math.sqrt(cfg.head_dim), - dropout_p=self.attention_dropout if self.training else 0.0, + attn_mask=causal_mask, + dropout_p=dropout_rate, + is_causal=is_causal, ) - else: - with torch.nn.attention.sdpa_kernel(self.mode): - attn_output = torch.nn.functional.scaled_dot_product_attention( - query_states, - key_states, - value_states, - attn_mask=causal_mask, - dropout_p=self.attention_dropout if self.training else 0.0, - is_causal=is_causal, - ) attn_output = attn_output.transpose(1, 2).contiguous() attn_output = attn_output.view(bsz, q_len, -1) attn_output = self.o_proj(attn_output) - return attn_output, None, past_key_value - - """ - def forward( - self, - hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - position_ids: Optional[torch.LongTensor] = None, - past_key_value: Optional[Cache] = None, - output_attentions: bool = False, - use_cache: bool = False, - cache_position: Optional[torch.LongTensor] = None, - **kwargs, - ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: - bsz, q_len, _ = hidden_states.size() - - query_states = self.q_proj(hidden_states) - key_states = self.k_proj(hidden_states) - value_states = self.v_proj(hidden_states) - - query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) - key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) - - cos, sin = self.rotary_emb(value_states, position_ids) - query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) - - past_key_value = getattr(self, "past_key_value", past_key_value) - - if past_key_value is not None: - # sin and cos are specific to RoPE models; cache_position needed for the static cache - cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} - key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) - - query_states = query_states.transpose(1, 2) - key_states = key_states.transpose(1, 2) - value_states = value_states.transpose(1, 2) - - dropout_rate = self.attention_dropout if self.training else 0.0 - - if self.mode == "xformers": - if attention_mask is None or attention_mask[0, 0, 0, 1] == 0: - attn_output = memory_efficient_attention(query_states, key_states, value_states, attn_bias=None, p=dropout_rate) - else: - attn_output = memory_efficient_attention(query_states, key_states, value_states, attn_bias=LowerTriangularMask(), p=dropout_rate) - else: - with torch.backends.cuda.sdp_kernel(enable_flash=self.mode == "flash", enable_math=self.mode == "math", enable_mem_efficient=self.mode == "mem_efficient"): - attn_output = torch.nn.functional.scaled_dot_product_attention(query_states, key_states, value_states, attn_mask=attention_mask, dropout_p=dropout_rate) - - attn_weights = None - - attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) - attn_output = self.o_proj(attn_output) - - return attn_output, attn_weights, past_key_value - """ \ No newline at end of file + return attn_output, None, past_key_value \ No newline at end of file diff --git a/vall_e/models/base.py b/vall_e/models/base.py index 2fd4449..7eb70fc 100755 --- a/vall_e/models/base.py +++ b/vall_e/models/base.py @@ -522,9 +522,6 @@ class Base(nn.Module): else: attention_backend = "eager" - if attention_backend == "xformers": - attention_backend = "mem_efficient" - hf_attention = attention_backend if attention_backend in ["xformers", "mem_efficient", "math", "flash", "cudnn", "flash_attn"]: @@ -579,7 +576,7 @@ class Base(nn.Module): attn_implementation=hf_attention, #gradient_checkpointing=self.gradient_checkpointing, )) - if attention_backend in ["mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: + if attention_backend in ["xformers", "mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: self.model = ml.replace_attention( self.model, klass=MixtralAttention_Adapted, target=MixtralAttention, mode=attention_backend ) if self.gradient_checkpointing and not self.model.gradient_checkpointing: @@ -604,7 +601,7 @@ class Base(nn.Module): attn_implementation=hf_attention, #gradient_checkpointing=self.gradient_checkpointing, )) - if attention_backend in ["mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: + if attention_backend in ["xformers", "mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: self.model = ml.replace_attention( self.model, klass=LlamaAttention_Adapted, target=LlamaAttention, mode=attention_backend ) else: self.model = MixtralModel(MixtralConfig( @@ -626,7 +623,7 @@ class Base(nn.Module): attn_implementation=hf_attention, #gradient_checkpointing=self.gradient_checkpointing, )) - if attention_backend in ["mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: + if attention_backend in ["xformers", "mem_efficient", "math", "flash", "cudnn", "auto", "flash_attn"]: self.model = ml.replace_attention( self.model, klass=MixtralAttention_Adapted, target=MixtralAttention, mode=attention_backend ) if self.gradient_checkpointing and not self.model.gradient_checkpointing: