# lifted from https://gist.github.com/pszemraj/e88ff24ab296b6d89057376b299b368a # to-do: make this work with LoRAs, it complains # Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import transformers import inspect class Unsloth_Offloaded_Gradient_Checkpointer(torch.autograd.Function): """ Saves VRAM by smartly offloading to RAM. Tiny hit to performance, since we mask the movement via non blocking calls. """ @staticmethod @torch.cuda.amp.custom_fwd def forward(ctx, forward_function, hidden_states, *args): saved_hidden_states = hidden_states.to("cpu", non_blocking=True) with torch.no_grad(): output = forward_function(hidden_states, *args) ctx.save_for_backward(saved_hidden_states) ctx.forward_function = forward_function ctx.args = args return output pass @staticmethod @torch.cuda.amp.custom_bwd def backward(ctx, dY): (hidden_states,) = ctx.saved_tensors hidden_states = hidden_states.to("cuda", non_blocking=True).detach() hidden_states.requires_grad = True with torch.enable_grad(): (output,) = ctx.forward_function(hidden_states, *ctx.args) torch.autograd.backward(output, dY) return ( None, hidden_states.grad, ) + ( None, ) * len(ctx.args) pass pass def new_gradient_checkpointing_enable(self, gradient_checkpointing_kwargs=None): #assert gradient_checkpointing_kwargs == None gradient_checkpointing_kwargs = None if not self.supports_gradient_checkpointing: raise ValueError( f"{self.__class__.__name__} does not support gradient checkpointing." ) gradient_checkpointing_func = Unsloth_Offloaded_Gradient_Checkpointer.apply # For old GC format (transformers < 4.35.0) for models that live on the Hub # we will fall back to the overwritten `_set_gradient_checkpointing` method _is_using_old_format = ( "value" in inspect.signature(self._set_gradient_checkpointing).parameters ) if not _is_using_old_format: self._set_gradient_checkpointing( enable=True, gradient_checkpointing_func=gradient_checkpointing_func ) else: raise NotImplementedError() if getattr(self, "_hf_peft_config_loaded", False): # When using PEFT + gradient checkpointing + Trainer we need to make sure the input has requires_grad=True # we do it also on PEFT: https://github.com/huggingface/peft/blob/85013987aa82aa1af3da1236b6902556ce3e483e/src/peft/peft_model.py#L334 # When training with PEFT, only LoRA layers will have requires grad set to True, but the output of frozen layers need to propagate # the gradients to make sure the gradient flows. self.enable_input_require_grads() def apply_unsloth_offloaded_gradient_checkpoint_monkey_patch(): transformers.modeling_utils.PreTrainedModel.gradient_checkpointing_enable = ( new_gradient_checkpointing_enable )