
    ;iC                     6   S SK r S SKrS SKrS SKJr  S SKJr  S SKJrJ	r	  S SK
r
S SKJr  SSKJrJrJrJrJrJrJr  SSKJrJrJrJrJrJrJrJrJrJrJ r J!r!J"r"J#r#J$r$J%r%J&r&  SSK'J(r(  SS	K)J*r*  \ RV                  " \,5      r-\S4S
\\.   4S jj5       r/\S4S\
R`                  S
\\.   4S jj5       r1    S5S\Rd                  S\\
R`                     S\.S\\3\4\
Rj                  4      S\\6\4      4
S jjr7  S6S\
R                  Rd                  S\\	\8\4\
R`                  4      S\\   4S jjr9   S7S\Rd                  S\	\4\Rt                  4   S\\
R`                     S\.S\\6\4      4
S jjr;        S8S\Rd                  S\3\4\	\4\8\
R`                  4   4   S\\
R`                     S\\3\4\
Rj                  4      S\\	\4\Rt                  4      S\\3\4\44      S\.S\\	\4\6\4   4      S\\6\4      S\.4S jjr<             S9S\Rd                  S\	\4\Rt                  4   S\\	\4\3\4\	\8\4\
R`                  4   4   4      S\\3\	\8\44   \	\8\44   4      S \\6\4      S!\\	\4\Rt                  4      S\.S"\\	\4\
Rz                  4      S#\\.   S\\	\4\6\4   4      S\\6\4      S\.S$\.S%\.S&\.4S' jjr>   S:S(\
R                  Rd                  S)\
Rz                  S*\
Rz                  S+\\	\4\?\4S,4   4      S-\\?\@\
R                  Rd                     S,4      S.\.S/S4S0 jjrA    S;S(\
R                  Rd                  S)\
Rz                  S*\
Rz                  S+\\	\4\?\4S,4   4      S-\\?\@\
R                  Rd                     S,4      S.\.S1\44S2 jjrBS\Rd                  4S3 jrCg)<    N)contextmanagerwraps)OptionalUnion   )AlignDevicesHook
CpuOffloadLayerwiseCastingHookUserCpuOffloadHookadd_hook_to_moduleattach_align_device_hook"attach_align_device_hook_on_blocks)OffloadedWeightsLoadercheck_cuda_p2p_ib_supportcheck_device_mapextract_submodules_state_dictfind_tied_parametersget_balanced_memoryinfer_auto_device_mapis_bnb_availableis_mlu_availableis_musa_availableis_npu_availableis_sdaa_availableis_xpu_availableload_checkpoint_in_modeloffload_state_dictparse_flag_from_envretie_parameters)&SUPPORTED_PYTORCH_LAYERS_FOR_UPCASTING)recursive_getattrinclude_buffersc              #      #    U c  [        SS5      n [        [        R                  " S5      U S9 nUv   SSS5        g! , (       d  f       g= f7f)a  
A context manager under which models are initialized with all parameters on the meta device, therefore creating an
empty model. Useful when just initializing the model would blow the available RAM.

Args:
    include_buffers (`bool`, *optional*):
        Whether or not to also put all buffers on the meta device while initializing.

Example:

```python
import torch.nn as nn
from accelerate import init_empty_weights

# Initialize a model with 100 billions parameters in no time and without using any RAM.
with init_empty_weights():
    tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)])
```

<Tip warning={true}>

Any model created under this context manager has no weights. As such you can't do something like
`model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`].
Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not
called.

</Tip>
NACCELERATE_INIT_INCLUDE_BUFFERSFmeta)r#   )r   init_on_devicetorchdevice)r#   fs     g/home/dmtnaga/Documents/work/airagagent/rag_env/lib/python3.13/site-packages/accelerate/big_modeling.pyinit_empty_weightsr,   <   sB     < -.OQVW	V,o	NRS 
O	N	Ns   .A>	A
AAr)   c              #     ^ ^^	#    Uc  [        SS5      nU(       a  T    Sv   SSS5        g[        R                  R                  m	U(       a  [        R                  R                  mU U	4S jnSU U4S jjnU(       a"  S Vs0 s H  nU[        [        U5      _M     nnO0 nU 4S jn U[        R                  l        U(       a  U[        R                  l        UR                  5        H'  n[        [        XF" [        [        U5      5      5        M)     Sv   T	[        R                  l        U(       a  T[        R                  l        UR                  5        H  u  pG[        [        XG5        M     g! , (       d  f       g= fs  snf ! T	[        R                  l        U(       a  T[        R                  l        UR                  5        H  u  pG[        [        XG5        M     f = f7f)	a  
A context manager under which models are initialized with all parameters on the specified device.

Args:
    device (`torch.device`):
        Device to initialize all parameters on.
    include_buffers (`bool`, *optional*):
        Whether or not to also put all buffers on the meta device while initializing.

Example:

```python
import torch.nn as nn
from accelerate import init_on_device

with init_on_device(device=torch.device("cuda")):
    tst = nn.Linear(100, 100)  # on `cuda` device
```
Nr%   Fc                   > T" XU5        Ubs  [        U R                  U   5      nU R                  U   R                  nUR                  US'   U" U R                  U   R	                  T5      40 UD6U R                  U'   g g )Nrequires_grad)type_parameters__dict__r/   to)modulenameparam	param_clskwargsr)   old_register_parameters        r+   register_empty_parameter0init_on_device.<locals>.register_empty_parameter   s    vU3V//56I''-66F&+&9&9F?#'01C1CD1I1L1LV1T'_X^'_Ft$	     c                 r   > T" XX#S9  Ub,  U R                   U   R                  T5      U R                   U'   g g )N)
persistent)_buffersr3   )r4   r5   bufferr>   r)   old_register_buffers       r+   register_empty_buffer-init_on_device.<locals>.register_empty_buffer   s:    F&H$*OOD$9$<$<V$DFOOD! r<   )emptyzerosonesfullc                    >^  UU 4S jnU$ )Nc                     > TUS'   T" U 0 UD6$ )Nr)    )argsr8   r)   fns     r+   wrapperAinit_on_device.<locals>.patch_tensor_constructor.<locals>.wrapper   s    %F8t&v&&r<   rJ   )rL   rM   r)   s   ` r+   patch_tensor_constructor0init_on_device.<locals>.patch_tensor_constructor   s    	' r<   )T)
r   nnModuleregister_parameterregister_buffergetattrr(   keyssetattritems)
r)   r#   r:   rB   torch_function_nametensor_constructors_to_patchrO   old_torch_functionrA   r9   s
   `       @@r+   r'   r'   `   s    * -.OQVW YY99 ii77`E E  (J(
'I#  0C!DD'I 	% (
$
 (*$D'?		$(=BII%#?#D#D#FE.0HQVXkIl0mn $G'=		$(;BII%7S7Y7Y7[3E.C 8\a V*(
, (>		$(;BII%7S7Y7Y7[3E.C 8\sC   G)E4AG)FG))A0F
 AG)4
F>G)
AG&&G)modelexecution_deviceoffload_buffers
state_dictpreload_module_classesc           	      8   Uc,  [        [        U R                  5       5      5      R                  nUcA  U R	                  5       R                  5        VVs0 s H  u  pVXVR                  S5      _M     nnn[        U [        SS9SS9  [        U USUUUS9  U $ s  snnf )a>  
Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one
copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that
state dict and put on the execution device passed as they are needed, then offloaded again.

Args:
    model (`torch.nn.Module`):
        The model to offload.
    execution_device (`torch.device`, *optional*):
        The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
        model first parameter device.
    offload_buffers (`bool`, *optional*, defaults to `False`):
        Whether or not to offload the buffers with the model parameters.
    state_dict (`Dict[str, torch.Tensor]`, *optional*):
        The state dict of the model that will be kept on CPU.
    preload_module_classes (`List[str]`, *optional*):
        A list of classes whose instances should load all their weights (even in the submodules) at the beginning
        of the forward. This should only be used for classes that have submodules which are registered but not
        called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
        `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
cpuTio_same_deviceappendr]   offloadr^   weights_mapr`   )
nextiter
parametersr)   r_   rX   r3   r   r	   r   )r\   r]   r^   r_   r`   nps          r+   cpu_offloadro      s    8 U%5%5%7 89@@161A1A1C1I1I1KL1Kaen1K
Lu.dCDQ)'5 L Ms   Bprev_module_hookc                 D    [        XS9n[        XSS9  [        X5      nX4$ )a  
Offloads a model on the CPU and puts it back to an execution device when executed. The difference with
[`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when
the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop.

Args:
    model (`torch.nn.Module`):
        The model to offload.
    execution_device(`str`, `int` or `torch.device`, *optional*):
        The device on which the model should be executed. Will default to the MPS device if it's available, then
        GPU 0 if there is a GPU, and finally to the CPU.
    prev_module_hook (`UserCpuOffloadHook`, *optional*):
        The hook sent back by this function for a previous model in the pipeline you are running. If passed, its
        offload method will be called just before the forward of the model to which this hook is attached.

Example:

```py
model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device)
model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1)
model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2)

hid_1 = model_1(input)
for i in range(50):
    # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop.
    hid_2 = model_2(hid_1)
# model2 is offloaded to the CPU just before this forward.
hid_3 = model_3(hid_3)

# For model3, you need to manually call the hook offload method.
hook_3.offload()
```
)r]   rp   Tre   )r
   r   r   )r\   r]   rp   hook	user_hooks        r+   cpu_offload_with_hookrt      s-    L '7[Du40"5/Ir<   offload_dirc           	         [         R                  R                  U5      (       aB  [         R                  R                  [         R                  R	                  US5      5      (       d  [        XR                  5       5        Uc,  [        [        U R                  5       5      5      R                  n[        US9n[        U [        SS9SS9  [        U USUUUS9  U $ )a@  
Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as
memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and
put on the execution device passed as they are needed, then offloaded again.

Args:
    model (`torch.nn.Module`): The model to offload.
    offload_dir (`str` or `os.PathLike`):
        The folder in which to offload the model weights (or where the model weights are already offloaded).
    execution_device (`torch.device`, *optional*):
        The device on which the forward pass of the model will be executed (should be a GPU). Will default to the
        model's first parameter device.
    offload_buffers (`bool`, *optional*, defaults to `False`):
        Whether or not to offload the buffers with the model parameters.
    preload_module_classes (`List[str]`, *optional*):
        A list of classes whose instances should load all their weights (even in the submodules) at the beginning
        of the forward. This should only be used for classes that have submodules which are registered but not
        called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
        `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.

index.json)save_folderTrc   re   rg   )ospathisdirisfilejoinr   r_   rj   rk   rl   r)   r   r   r	   r   )r\   ru   r]   r^   r`   ri   s         r+   disk_offloadr~     s    6 77==%%RWW^^BGGLLVb<c-d-d;(8(8(:;U%5%5%7 89@@([AKu.dCDQ)'5 Lr<   
device_mapmain_deviceoffload_index	skip_keysforce_hooksc
                 P   [        X5        [        U SS5      S:X  a4  [        U SS5      (       d   [        U SS5      (       a  [        SS9(       d  Sn	[        [	        UR                  5       5      5      S	:  d  U	(       Ga  Uck  [	        UR                  5       5      S1:X  d  [	        UR                  5       5      SS1:X  a  SnO+UR                  5        V
s/ s H  oS;  d  M
  U
PM     sn
S   nUS:w  aX  UR                  5        VVs/ s H  u  pUS:X  d  M  UPM     nnnUc)  [        U5      S:  a  [        U R                  5       U5      nUR                  5        VVs/ s H  u  pUS:X  d  M  UPM     nnnUc0  Uc-  [        U5      S:  a  [        SSR                  U5       S35      e[        U5      S:  a  Uc  [        R                  R                  U5      (       aB  [        R                  R                  [        R                  R                  US5      5      (       d%  [        U R                  5       U5      n[        XO5        UR                  5        VVs0 s H  u  pXS;   a  UOU_M     nnnUUS'   US:X  d  US:X  a  S/OSS/nUR                  5        VVs0 s H
  u  pXU;   _M     nnn[        U5      S:  a  UOS
nUc  Uc  Ub  Ub  UOS
n[!        UUX\S9nOS
n[#        U 5      n0 nU H+  nU H"  n[%        U U5      R'                  5       n0 UU'   M$     M-     [)        U UUUUUUUS9  SR                  [	        UR                  5       5       Vs/ s H  oS;   d  M
  UPM     sn5      n[        U5      S:  a  [*        R-                  SU S35        [/        U U5        S nU" U R0                  U 5      U l        [3        5       (       a  U" U R4                  U 5      U l        O[7        5       (       a  U" U R8                  U 5      U l        O[;        5       (       a  U" U R<                  U 5      U l        Oh[?        5       (       a  U" U R@                  U 5      U l         O@[C        5       (       a  U" U RD                  U 5      U l"        OU" U RF                  U 5      U l#        [        [	        UR                  5       5       Vs/ s H  oS;  d  M
  UPM     sn5      S	:  nU(       a$  [I        5       (       d  [*        R-                  S5        O[K        UR                  5       5      S   n[3        5       (       a  [M        U[N        5      (       a  SU 3nO}[7        5       (       a  [M        U[N        5      (       a  SU 3nOS[;        5       (       a  [M        U[N        5      (       a  SU 3nO)[?        5       (       a  [M        U[N        5      (       a  SU 3nUS:w  a  U R1                  U5        O[        S5      e[Q        U5      U l)        U $ s  sn
f s  snnf s  snnf s  snnf s  snnf s  snf s  snf ) a2  
Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on
the CPU or even the disk.

Args:
    model (`torch.nn.Module`):
        The model to dispatch.
    device_map (`Dict[str, Union[str, int, torch.device]]`):
        A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that
        `"disk"` is accepted even if it's not a proper value for `torch.device`.
    main_device (`str`, `int` or `torch.device`, *optional*):
        The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or
        `"disk"`.
    state_dict (`Dict[str, torch.Tensor]`, *optional*):
        The state dict of the part of the model that will be kept on CPU.
    offload_dir (`str` or `os.PathLike`):
        The folder in which to offload the model weights (or where the model weights are already offloaded).
    offload_index (`Dict`, *optional*):
        A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default
        to the index saved in `save_folder`.
    offload_buffers (`bool`, *optional*, defaults to `False`):
        Whether or not to offload the buffers with the model parameters.
    skip_keys (`str` or `List[str]`, *optional*):
        A list of keys to ignore when moving inputs or outputs between devices.
    preload_module_classes (`List[str]`, *optional*):
        A list of classes whose instances should load all their weights (even in the submodules) at the beginning
        of the forward. This should only be used for classes that have submodules which are registered but not
        called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
        `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
    force_hooks (`bool`, *optional*, defaults to `False`):
        Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
        single device.
quantization_methodbitsandbytesis_loaded_in_8bitFis_loaded_in_4bitz0.43.2)min_versionTr   Nrb   disk)rb   r   r   zWe need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules need to be offloaded: z, .rw    mps)r_   rx   indexr)   )r]   rh   r^   ri   r   r`   tied_params_mapz and zJSome parameters are on the meta device because they were offloaded to the c                 4   ^ ^ [        T 5      U U4S j5       nU$ )Nc                    > Sn[        TR                  5      S:X  aF  [        R                  R                  R
                  " U 0 UD6S   nUb  [        R                  U5        O[        R                  U5        TR                  5        H2  nUR                  [        R                  " S5      :X  d  M)  [        S5      e   T" U 0 UD6$ )NzEYou shouldn't move a model that is dispatched using accelerate hooks.r3   r   r&   zFYou can't move a model that has some modules offloaded to cpu or disk.)str__name__r(   _C_nn	_parse_tologgerwarningrl   r)   RuntimeError)rK   r8   warning_msg	to_devicer6   rL   r\   s        r+   rM   4dispatch_model.<locals>.add_warning.<locals>.wrapper  s    er{{#t+ % 6 6 G G JI ,{3NN;/"--/E||u||F';;*+stt 0 4*6**r<   r   )rL   r\   rM   s   `` r+   add_warning#dispatch_model.<locals>.add_warning  s     2Y+ + Nr<   zWe've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. This can affect the multi-gpu inference when using accelerate device_map.Please make sure to update your driver to the latest version which resolves this.znpu:zmlu:zsdaa:zmusa:zfYou are trying to offload the whole model to the disk. Please use the `disk_offload` function instead.)*r   rU   r   lensetvaluesrX   r   r_   
ValueErrorr}   ry   rz   r{   r|   r   r   r   r"   data_ptrr   r   r   r    r3   r   npur   mlur   sdaar   musar   xpucudar   list
isinstanceintdicthf_device_map)r\   r   r   r_   ru   r   r^   r   r`   r   dr5   r)   cpu_modulesdisk_modulesdisk_state_dictr]   offloaded_devicesrh   rx   ri   tied_paramsr   group
param_namer   offloaded_devices_strr   use_multi_gpus                                r+   dispatch_modelr   5  s   \ U' u+^<N5-u55E.66?O\d?eK 	C
!!#$%)k:$$&'E72c*:K:K:M6NSXZ`Ra6a#*4*;*;*=Z*=Q/AYq*=Z[\]%4>4D4D4FZ4FLD&TY/44FKZ!c+&6&::5;K;K;M{[
1;1A1A1CX1CvQWGW1CX=#8S=NQR=R))-<)@(AD 
 !%WW]];//rww~~bggllS^`lFm7n7n;E<L<L<NP\]O{< \f[k[k[m
[m<4D?!:+F[m 	 
  +(3u(<u@TVH[`bhZiISIYIYI[\I[4#444I[\%(%6%:k![%<@Y$1$=[4F0%;mK K
 +51 E#
 -UJ?HHJ,.)	 $ ! 	+-+##9+		
 !("%j&7&7&9":X":>WV":X!
 $%)NN\]r\sstu
 	,	$ uxx/#EIIu5EI#EIIu5EI  $UZZ7EJ  $UZZ7EJ#EIIu5EI$UZZ7EJ #j6G6G6I2Jl2J\kNkV2Jlmpqq!:!<!<NNd j'')*1-*VS"9"9F8_FJvs$;$;F8_F  Z%<%<VH%F  Z%<%<VH%FVHHVx  z*ELO [ [ Y

 ]N YX msH   	X!XXXX.XXX	X$X 	X#X#
checkpoint
max_memoryno_split_module_classesoffload_folderdtyper   strictfull_state_dictbroadcast_from_rank0c                 B   [        U[        5      (       a  US;  a  [        S5      e[        U[        5      (       a#  US:w  a  [        U UUUUS:H  S9n[	        U UUUUS9nUc  Ub  SUR                  5       ;   a  Sn[        U UUUUUUUUUS	9
  Uc  U $ [        U UUUU	U
US
9$ )a  
Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are
loaded and adds the various hooks that will make this model run properly (even if split across devices).

Args:
    model (`torch.nn.Module`): The model in which we want to load a checkpoint.
    checkpoint (`str` or `os.PathLike`):
        The folder checkpoint to load. It can be:
        - a path to a file containing a whole model state dict
        - a path to a `.json` file containing the index to a sharded checkpoint
        - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint.
    device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*):
        A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer
        name, once a given module name is inside, every submodule of it will be sent to the same device.

        To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more
        information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map).
        Defaults to None, which means [`dispatch_model`] will not be called.
    max_memory (`Dict`, *optional*):
        A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU
        and the available CPU RAM if unset.
    no_split_module_classes (`List[str]`, *optional*):
        A list of layer class names that should never be split across device (for instance any layer that has a
        residual connection).
    offload_folder (`str` or `os.PathLike`, *optional*):
        If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
    offload_buffers (`bool`, *optional*, defaults to `False`):
        In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as
        well as the parameters.
    dtype (`str` or `torch.dtype`, *optional*):
        If provided, the weights will be converted to that type when loaded.
    offload_state_dict (`bool`, *optional*):
        If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if
        the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map
        picked contains `"disk"` values.
    skip_keys (`str` or `List[str]`, *optional*):
        A list of keys to ignore when moving inputs or outputs between devices.
    preload_module_classes (`List[str]`, *optional*):
        A list of classes whose instances should load all their weights (even in the submodules) at the beginning
        of the forward. This should only be used for classes that have submodules which are registered but not
        called directly during the forward, for instance if a `dense` linear layer is registered, but at forward,
        `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly.
    force_hooks (`bool`, *optional*, defaults to `False`):
        Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a
        single device.
    strict (`bool`, *optional*, defaults to `False`):
        Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's
        state_dict.
    full_state_dict (`bool`, *optional*, defaults to `True`): if this is set to `True`, all the tensors in the
        loaded state_dict will be gathered. No ShardedTensor and DTensor will be in the loaded state_dict.
    broadcast_from_rank0 (`False`, *optional*, defaults to `False`): when the option is `True`, a distributed
        `ProcessGroup` must be initialized. rank0 should receive a full state_dict and will broadcast the tensors
        in the state_dict one by one to other ranks. Other ranks will receive the tensors and shard (if applicable)
        according to the local shards in the model.

Example:

```python
>>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch
>>> from huggingface_hub import hf_hub_download
>>> from transformers import AutoConfig, AutoModelForCausalLM

>>> # Download the Weights
>>> checkpoint = "EleutherAI/gpt-j-6B"
>>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin")

>>> # Create a model and initialize it with empty weights
>>> config = AutoConfig.from_pretrained(checkpoint)
>>> with init_empty_weights():
...     model = AutoModelForCausalLM.from_config(config)

>>> # Load the checkpoint and dispatch it to the right devices
>>> model = load_checkpoint_and_dispatch(
...     model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"]
... )
```
)autobalancedbalanced_low_0
sequentialziIf passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or 'sequential'.r   r   )r   r   r   low_zero)r   r   r   r^   r   T)r   r   r   r   r^   r   r   r   )r   ru   r^   r   r`   r   )r   r   r   r   r   r   r   r   )r\   r   r   r   r   r   r^   r   r   r   r`   r   r   r   r   s                  r+   load_checkpoint_and_dispatchr      s    | *c""z9m'mw
 	
 *c""%,%(?$(88J +!$;+

 !j&<:K\K\K^A^!%-''1 "'5 r<   r4   storage_dtypecompute_dtypeskip_modules_pattern.skip_modules_classesnon_blockingreturnc                     [        XX#XE5        g)aZ  
Applies layerwise casting to a given module. The module expected here is a PyTorch `nn.Module`. This is helpful for
reducing memory requirements when one doesn't want to fully quantize a model. Model params can be kept in say,
`torch.float8_e4m3fn` and upcasted to a higher precision like `torch.bfloat16` during forward pass and downcasted
back to `torch.float8_e4m3fn` to realize memory savings.

Args:
    module (`torch.nn.Module`):
        The module whose leaf modules will be cast to a high precision dtype for computation, and to a low
        precision dtype for storage.
    storage_dtype (`torch.dtype`):
        The dtype to cast the module to before/after the forward pass for storage.
    compute_dtype (`torch.dtype`):
        The dtype to cast the module to during the forward pass for computation.
    skip_modules_pattern (`tuple[str, ...]`, defaults to `None`):
        A list of patterns to match the names of the modules to skip during the layerwise casting process. If set
        to `None` alongside `skip_modules_classes` being `None`, the layerwise casting is applied directly to the
        module instead of its internal submodules.
    skip_modules_classes (`tuple[type[torch.nn.Module], ...]`, defaults to `None`):
        A list of module classes to skip during the layerwise casting process.
    non_blocking (`bool`, defaults to `False`):
        If `True`, the weight casting operations are non-blocking.

Example:

```python
>>> from accelerate.hooks import attach_layerwise_casting_hooks
>>> from transformers import AutoModelForCausalLM
>>> import torch

>>> # Model
>>> checkpoint = "EleutherAI/gpt-j-6B"
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)

>>> # Attach hooks and perform inference
>>> attach_layerwise_casting_hooks(model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
>>> with torch.no_grad():
...     model(...)
```

Users can also pass modules they want to avoid from getting downcasted.

```py
>>> attach_layerwise_casting_hooks(
...     model, storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16, skip_modules_pattern=["norm"]
... )
```
N)_attach_layerwise_casting_hooks)r4   r   r   r   r   r   s         r+   attach_layerwise_casting_hooksr     s    p $}DXr<   _prefixc                   ^ US L=(       a    [        X5      =(       d     US L=(       a    [        U4S jU 5       5      nU(       a  [        R                  ST S35        g [        U [        5      (       a-  [        R                  ST S35        [        U [        XUS9SS9  g U R                  5        H$  u  pT(       a  T SU 3OUn
[        U	UUUUUU
S	9  M&     g )
Nc              3   R   >#    U  H  n[         R                  " UT5      v   M     g 7fN)research).0patternr   s     r+   	<genexpr>2_attach_layerwise_casting_hooks.<locals>.<genexpr>  s"     0q\pQX7G1L1L\ps   $'z&Skipping layerwise casting for layer ""z%Applying layerwise casting to layer ")r   r   r   Tre   r   )r   )	r   anyr   debugr!   r   r   named_childrenr   )r4   r   r   r   r   r   r   should_skipr5   	submodule
layer_names         `    r+   r   r     s     (t3`
68` D(qS0q\p0q-q  =gYaHI&@AA<WIQGH }htu	

 	!002,3y$(
'  	
 3r<   c                     S nU R                  5        H.  u  p#UR                  S5      (       d  M  UR                  USSS9  M0     g)aS  
Monkeypatch huggingface's `transformers` model to fix attention mask issues when using context parallelism.

This function attaches forward_pre_hooks to each self_attn module of the model, where each hook checks the
args/kwargs, if they contain an attention mask, if it does, it will remove this mask, check if it is a causal mask,
if yes, will add a kwarg `is_causal=True`, otherwise will raise an error. This is because context parallelism does
not support attention masks. This function modifies the model in place.

Args:
    model (`nn.Module`):
        The model to attach the hooks to.

c                 (    SU;   a
  S US'   SUS'   X4$ )Nattention_maskT	is_causalrJ   )_modulemodule_argsmodule_kwargss      r+   _self_attn_pre_forward_hookC_attach_context_parallel_hooks.<locals>._self_attn_pre_forward_hook  s(    },.2M*+)-M+&))r<   	self_attnT)with_kwargsprependN)named_modulesendswithregister_forward_pre_hook)r\   r   r5   r4   s       r+   _attach_context_parallel_hooksr     sG    "* ++- ==%%,,-HVZdh,i .r<   r   )NFNN)NN)NFN)NNNNFNNF)NNNNFNNNNFFTF)NNF)NNFr   )Dloggingry   r   
contextlibr   	functoolsr   typingr   r   r(   torch.nnrQ   hooksr	   r
   r   r   r   r   r   utilsr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    utils.constantsr!   utils.otherr"   	getLoggerr   r   boolr,   r)   r'   rR   r   r   Tensorr   ro   r   rt   PathLiker~   r   r   r   tupler0   r   r   r   rJ   r<   r+   <module>r     s    	 	 %  "        & D * 
		8	$      F ID5<< ID(4. ID ID\ 04!4826+99+u||,+ + c5<</01	+
 %T#Y/+` AE59)88??)uS#u||%;<=) 12)^ 04!26+99+sBKK'(+ u||,+ 	+
 %T#Y/+b +/4859.2!1526H99HS%S%,, 6778H %,,'H c5<</01	H
 %R[[ 012H DcN+H H c49n-.H %T#Y/H H\ RVCG378<!/3)-1526 !&J99Jc2;;&'J sDeCell4J.K)K$LLMNJ eCHouS#X>?@	J
 &d3i0J U3#345J J E#u{{*+,J !J c49n-.J %T#Y/J J J J Jb CGHL:HHOO:;;: ;;: #5eCHo)=#>?	:
 #5ehhoo)>)C#DE: : 
:B CGHL#
HHOO#
;;#
 ;;#
 #5eCHo)=#>?	#

 #5ehhoo)>)C#DE#
 #
 #
L%j99%jr<   