
    ;i                   6   S SK Jr  S SKrS SKrS SKrS SKrS SKrS SKrS SKrS SK	r	S SK
Jr  S SKJr  S SKJr  S SKJr  S SKJrJrJr  S SKrS SKJs  Jr  S SKJr  S S	KJr  S
SKJr  S
SKJrJ r J!r!J"r"  S
SK#J$r$J%r%J&r&  S
SK'J(r(  S
SK)J*r*  S
SK+J,r,  S
SK-J.r.  S
SK/J0r0J1r1J2r2  S
SK3J4r4J5r5J6r6  S
SKJ7r7J8r8J9r9J:r:J;r;J<r<J=r=J>r>J?r?J@r@JArAJBrBJCrCJDrDJErEJFrFJGrGJHrHJIrIJJrJJKrKJLrLJMrMJNrNJOrOJPrPJQrQJRrRJSrSJTrTJUrUJVrVJWrWJXrXJYrYJZrZJ[r[J\r\J]r]J^r^J_r_J`r`JaraJbrbJcrcJdrdJereJfrfJgrgJhrhJiriJjrjJkrkJlrlJmrmJnrnJoroJprpJqrqJrrrJsrsJtrtJuruJvrvJwrwJxrxJyryJzrzJ{r{J|r|J}r}J~r~JrJrJrJrJr  S
SKJrJrJrJr  S
SKJr  S
SKJrJrJr  \k" 5       (       a  S
SKJrJrJrJrJrJr  \n" 5       (       a  S
SKJrJrJrJrJrJrJrJr  S SKJr  \t" 5       (       a  S SKJs  Jr  S SKJs  Jr  \r" SS9(       a  S SKr S SKJr  \(" \5      r\" 5       r\" 5       r\" 5       r\" 5       r " S S 5      rg! \ a	    S SKJr   N=f = f)!    )annotationsN)OrderedDict)contextmanager)partial)
MethodType)AnyCallableUnion)"split_torch_state_dict_into_shards)FP8BackendType   )_attach_context_parallel_hooks)load_accelerator_stateload_custom_statesave_accelerator_statesave_custom_state)DataLoaderDispatcherprepare_data_loaderskip_first_batches)
get_logger)AcceleratedOptimizer)ParallelismConfig)AcceleratedScheduler)AcceleratorStateGradientStatePartialState)LOGGER_TYPE_TO_CLASSGeneralTrackerfilter_trackers)M
MODEL_NAMESAFE_WEIGHTS_INDEX_NAMESAFE_WEIGHTS_NAMESAFE_WEIGHTS_PATTERN_NAMEWEIGHTS_INDEX_NAMEWEIGHTS_NAMEWEIGHTS_PATTERN_NAMEAORecipeKwargsAutocastKwargsDataLoaderConfigurationDeepSpeedPluginDistributedDataParallelKwargsDistributedTypeDynamoBackendFP8RecipeKwargsFullyShardedDataParallelPluginGradientAccumulationPluginGradScalerKwargsInitProcessGroupKwargsKwargsHandler
LoggerTypeMegatronLMPluginMSAMPRecipeKwargsPrecisionTypeProfileKwargsProjectConfigurationRNGTypeTERecipeKwargsTorchDynamoPluginTorchTensorParallelPluginapply_fp8_autowrapcheck_os_kernel clean_state_dict_for_safetensorscompare_versionsconvert_modelconvert_model_to_fp8_aoconvert_outputs_to_fp32ensure_weights_retiedextract_model_from_parallelfsdp2_apply_acfsdp2_canonicalize_namesfsdp2_prepare_model!fsdp2_switch_optimizer_parametersgathergather_objectget_fsdp2_grad_scalerget_grad_scaler#get_mixed_precision_context_managerget_pretty_namehas_offloaded_paramsis_bf16_available'is_bitsandbytes_multi_backend_availableis_deepspeed_availableis_ipex_availableis_lomo_availableis_megatron_lm_availableis_mlu_availableis_msamp_availableis_musa_availableis_npu_availableis_torch_versionis_torch_xla_availableis_torchao_availableis_transformer_engine_availableis_xpu_availableload_fsdp_modelload_fsdp_optimizermodel_has_dtensorpad_across_processesparse_choice_from_envrecursively_applyreducerelease_memorysavesave_fsdp_modelsave_fsdp_optimizerwait_for_everyone)FSDP2_PYTORCH_VERSIONFSDP_PYTORCH_VERSIONPROFILE_PATTERN_NAMESCALER_NAME)get_state_dict_offloaded_model)compile_regionscompile_regions_deepspeedis_compiled_module)DeepSpeedEngineWrapperDeepSpeedOptimizerWrapperDeepSpeedSchedulerWrapper
DummyOptimDummySchedulermap_pytorch_optim_to_deepspeed)MegatronEngineMegatronLMDummyDataLoaderMegatronLMDummySchedulerMegatronLMOptimizerWrapperMegatronLMSchedulerWrappermegatron_lm_initializemegatron_lm_prepare_data_loader-megatron_lm_prepare_model_optimizer_scheduler)JoinF)check_device)LRScheduler)_LRSchedulerc                     \ rS rSrSrS\SSSSSSSSSSSSSSSSSSS4                                         SrS jjr\S 5       r\S	 5       r	\S
 5       r
\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\R&                  SsS j5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\S 5       r\StS  j5       r \S! 5       r!\S" 5       r"\SuS# j5       r#\SuS$ j5       r$\SuS% j5       r%\SuS& j5       r&\SuS' j5       r'\(SvSwS( jj5       r)SxSyS) jjr*SxSyS* jjr+SzS+ jr,S{S|S, jjr-S{S}S- jjr.\(S. 5       r/\(S/ 5       r0\(S0 5       r1\2\(S1 5       5       r3S2 r4\S3 5       r5\5R&                  S4 5       r5\S5 5       r6\6R&                  S6 5       r6\(S7 5       r7\(SxS8 j5       r8S9 r9S~S: jr:SS;.S< jr;S= r<S> r=S? r> S     SS@ jjr?SA r@SB rASC rBSD rCSE rDSF rESG rF S{ SSH jjrGSxSSI jjrHSSJ jrISK rJSL rKSM rLSxSN jrMSSO jrNSP rOSQ rPSvSR jrQSSS jrRSST jrSSSSU jjrTSV rU\*S0 4SSW jj5       rVSvSSX jjrW\*S0 4SSY jj5       rXSZ rYSvS[ jrZ  S       SS\ jjr[SS] jr\SSS^ jjr]SS_ jr^S{SS` jjr_Sa r`Sb raSSc.Sd jrbSe rcSSf jrdSg re\(   S     SSh jj5       rf\(SxSSi jj5       rg\(SxSSj jj5       rh\Sk 5       riSSSl jjrjSm rkSSn jrlSSo jrm\SSp j5       rnSqrog)Accelerator   a  
Creates an instance of an accelerator for distributed training or mixed precision training.

Args:
    device_placement (`bool`, *optional*, defaults to `True`):
        Whether or not the accelerator should put objects on device (tensors yielded by the dataloader, model,
        etc...).
    mixed_precision (`str`, *optional*):
        Whether or not to use mixed precision training. Choose from 'no','fp16','bf16' or 'fp8'. Will default to
        the value in the environment variable `ACCELERATE_MIXED_PRECISION`, which will use the default value in the
        accelerate config of the current system or the flag passed with the `accelerate.launch` command. 'fp8'
        requires the installation of transformers-engine.
    gradient_accumulation_steps (`int`, *optional*, default to 1):
        The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with
        `Accelerator.accumulate`. If not passed, will default to the value in the environment variable
        `ACCELERATE_GRADIENT_ACCUMULATION_STEPS`. Can also be configured through a `GradientAccumulationPlugin`.
    cpu (`bool`, *optional*):
        Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force
        the execution on one process only.
    dataloader_config (`DataLoaderConfiguration`, *optional*):
        A configuration for how the dataloaders should be handled in distributed scenarios.
    deepspeed_plugin ([`~utils.DeepSpeedPlugin`] or dict of `str`: [`~utils.DeepSpeedPlugin`], *optional*):
        Tweak your DeepSpeed related args using this argument. This argument is optional and can be configured
        directly using *accelerate config*. If using multiple plugins, use the configured `key` property of each
        plugin to access them from `accelerator.state.get_deepspeed_plugin(key)`. Alias for `deepspeed_plugins`.
    fsdp_plugin ([`~utils.FullyShardedDataParallelPlugin`], *optional*):
        Tweak your FSDP related args using this argument. This argument is optional and can be configured directly
        using *accelerate config*
    torch_tp_plugin ([`~utils.TorchTensorParallelPlugin`], *optional*):
        Deprecated: use `parallelism_config` with `tp_size` instead.
    megatron_lm_plugin ([`~utils.MegatronLMPlugin`], *optional*):
        Tweak your MegatronLM related args using this argument. This argument is optional and can be configured
        directly using *accelerate config*
    rng_types (list of `str` or [`~utils.RNGType`]):
        The list of random number generators to synchronize at the beginning of each iteration in your prepared
        dataloaders. Should be one or several of:

        - `"torch"`: the base torch random number generator
        - `"cuda"`: the CUDA random number generator (GPU only)
        - `"xla"`: the XLA random number generator (TPU only)
        - `"generator"`: the `torch.Generator` of the sampler (or batch sampler if there is no sampler in your
          dataloader) or of the iterable dataset (if it exists) if the underlying dataset is of that type.

        Will default to `["torch"]` for PyTorch versions <=1.5.1 and `["generator"]` for PyTorch versions >= 1.6.
    log_with (list of `str`, [`~utils.LoggerType`] or [`~tracking.GeneralTracker`], *optional*):
        A list of loggers to be setup for experiment tracking. Should be one or several of:

        - `"all"`
        - `"tensorboard"`
        - `"wandb"`
        - `"trackio"`
        - `"aim"`
        - `"comet_ml"`
        - `"mlflow"`
        - `"dvclive"`
        - `"swanlab"`
        If `"all"` is selected, will pick up all available trackers in the environment and initialize them. Can
        also accept implementations of `GeneralTracker` for custom trackers, and can be combined with `"all"`.
    project_config ([`~utils.ProjectConfiguration`], *optional*):
        A configuration for how saving the state can be handled.
    project_dir (`str`, `os.PathLike`, *optional*):
        A path to a directory for storing data such as logs of locally-compatible loggers and potentially saved
        checkpoints.
    step_scheduler_with_optimizer (`bool`, *optional*, defaults to `True`):
        Set `True` if the learning rate scheduler is stepped at the same time as the optimizer, `False` if only
        done under certain circumstances (at the end of each epoch, for instance).
    kwargs_handlers (list of [`~utils.KwargsHandler`], *optional*)
        A list of [`~utils.KwargsHandler`] to customize how the objects related to distributed training, profiling
        or mixed precision are created. See [kwargs](kwargs) for more information.
    dynamo_backend (`str` or [`~utils.DynamoBackend`], *optional*, defaults to `"no"`):
        Set to one of the possible dynamo backends to optimize your training with torch dynamo.
    dynamo_plugin ([`~utils.TorchDynamoPlugin`], *optional*):
        A configuration for how torch dynamo should be handled, if more tweaking than just the `backend` or `mode`
        is needed.
    gradient_accumulation_plugin ([`~utils.GradientAccumulationPlugin`], *optional*):
        A configuration for how gradient accumulation should be handled, if more tweaking than just the
        `gradient_accumulation_steps` is needed.

**Available attributes:**

    - **device** (`torch.device`) -- The device to use.
    - **distributed_type** ([`~utils.DistributedType`]) -- The distributed training configuration.
    - **local_process_index** (`int`) -- The process index on the current machine.
    - **mixed_precision** (`str`) -- The configured mixed precision mode.
    - **num_processes** (`int`) -- The total number of processes used for training.
    - **optimizer_step_was_skipped** (`bool`) -- Whether or not the optimizer update was skipped (because of
      gradient overflow in mixed precision), in which
    case the learning rate should not be changed.
    - **process_index** (`int`) -- The overall index of the current process among all processes.
    - **state** ([`~state.AcceleratorState`]) -- The distributed setup state.
    - **sync_gradients** (`bool`) -- Whether the gradients are currently being synced across all processes.
    - **use_distributed** (`bool`) -- Whether the current configuration is for distributed training.
TNr   Fc                   / U l         Ub  Xl        O[        US9U l        Ub(  U R                  c  U R                  R	                  U5        Ub9  [        U5      nU[        ;  a$  [        SU S[        R                  " 5        35      eU	b  [        R                  " S[        5        Ub  Ub  [        S5      eUb
  [        US9nOUc
  [        5       nUb  Ub  [        S5      eUb  UnUc  [        R                  0 :w  a;  [        5       R                  [         R"                  :X  a  [        5       R$                  nO[&        R(                  R+                  SS	5      R-                  5       S
:X  a
  [/        5       OS nO[        R                  0 :w  aF  [        5       R                  [         R"                  :X  a   [        5       R$                  b  [1        S5      e[3        U[4        5      (       a7  UR7                  5        H#  n[3        U[.        5      (       a  M  [9        S5      e   Ub  S
[&        R(                  S'   [;        5       (       d  [=        S5      e[?        5       (       a  [A        SSS5      (       a  [=        S5      eOJ[C        5       (       a  [A        SSS5      (       a  [=        S5      eO[A        SSS5      (       a  [=        S5      eS U l"        [&        R(                  R+                  SS	5      R-                  5       S
:X  d  [3        U[F        5      (       a'  [I        S[J        5      (       d  [        S[J         35      eUc?  [&        R(                  R+                  SS	5      R-                  5       S
:X  a
  [G        5       OS nO3[3        U[F        5      (       d  [9        S5      eS
[&        R(                  S'   Ub7  URL                  S:X  a'  [I        S[N        5      (       d  [=        S[N         35      eU
c?  [&        R(                  R+                  SS	5      R-                  5       S
:X  a
  [Q        5       OS n
O3[3        U
[P        5      (       d  [9        S5      eS
[&        R(                  S'   U
(       a  [S        5       (       d  [=        S5      eS U l*        S U l+        S U l,        S U l-        S U l.        S U l/        S U l0        S U l1        S U l2        SU l3        [i        5       n[j        S [l        S![n        S"[p        S#[r        S$[t        S%[v        S&[x        S'[z        S(0	nSU l>        Ub  U H  n[3        U[~        5      (       d   S)U S*35       eUR                  U;   a  [        S+UR                   S,35      eUR                  UR                  5        UUR                     n[        U UU5        S-U;   d  M  U R|                  (       a  M  S.U l>        M     UcS  U	b  [        U	R                  S/9nO<[&        R(                  R+                  S0S	5      R-                  5       S
:X  a
  [        5       nU RX                  b  U RX                  R                  5       O0 n[        SVUUUUUU
US.S1.UD6U lF        U R                  (       aO  UR                  U R                  R                  5      U R                  lK        U R                  R                  U 5        U R                  R                  S2:H  =(       d    US2:H  U lN        U R                  (       Gai  U R|                  (       GdW  U R                  [        R                  :X  a  [w        5       U l.        GO!U R                  [        R                  :X  a  [y        5       U l/        OU R                  [        R                  :X  a  [{        5       U l0        OU R                  [        R                  :X  a  [        5       (       a%  [        R                  S35        [w        5       U l.        Os[        5       (       a%  [        R                  S45        [y        5       U l/        O?[        5       (       a%  [        R                  S55        [{        5       U l0        O[=        S65      eS.U l>        SU lZ        U R|                  (       a  U R                  (       d9  U R                  [         R                  [         R"                  4;  a  [        S75      eU R                  S8:H  =(       a-    U R                  [         R                  [         R                  4;   U lZ        U R                  [        R                  :X  aX  U R                  R                  [         R                  :X  a0  U R                  R                  R                  (       a  [        S95      e[        XR                  5      n[        U5      S::  a  Ub  [        R                  " S;U S<35        UU lb        US=:w  aO  [        U R                  S>S5      (       a3  U R                  R                  [         R                  :w  a  [        S?5      eUb  US::w  a  [        S@5      eO[        [        SAU5      5      n[        USB9n[        USC9U lj        Xlk        Uc
  [        5       nX`lm        UU ln        S U lo        SU lp        U R                  R                  SD:X  GaW  U R                  R                  SE:w  Ga<  U R                  [         R"                  [         R                  4;  Ga  S.U lp        SFnU R                  R                  U;  d  [        S.SG9(       a'  [        SHU SIU R                  R                  < SJ35      eU R                  R                  SK:X  a  [I        SSL5      (       d  [        SM5      eU RV                  b  U RV                  R                  5       O0 nU R                  (       a(  [        SVSNU R                  R                  0UD6U lo        GO[        U R                  40 UD6U lo        GOzU R                  R                  S=:X  a  U R                  [         R"                  [         R                  4;  a  U R                  R                  SO;   a  S.U lp        O[        S.5      U lp        U R                  (       d  [        5       (       d  [        SP5      eU R                  (       a6  U R                  R                  SK:X  a  [I        SSQ5      (       d  [        SR5      eOU R                  (       a|  S.U lp        U R                  [        R                  :X  aW  U R                  [         R                  :X  a  [1        SS5      eU R                  [         R"                  :w  a  [        SV0 UD6U lo        STU lw        / U lx        / U ly        / U lz        / U l{        / U l|        [        5       U l~        [        5       U l        Xl        U GR                   c  SU/U l        S U l        G[        5         g )WN)project_dirzUnknown mixed_precision mode: z. Choose between z`TorchTensorParallelPlugin` is deprecated and will be removed in a future version of Accelerate. Please use the `ParallelismConfig` with `tp_size` instead.zVYou cannot pass in both `dynamo_plugin` and `dynamo_backend`, please only pass in one.)backendzCYou cannot pass in both `deepspeed_plugins` and `deepspeed_plugin`.ACCELERATE_USE_DEEPSPEEDfalsetruezYou cannot pass in a `deepspeed_plugin` when creating a second `Accelerator`. Please make sure the first `Accelerator` is initialized with all the plugins you want to use.z4`deepspeed_plugin` must be a DeepSpeedPlugin object.zRDeepSpeed is not installed => run `pip install deepspeed` or build it from source.	deepspeed<z0.15.2zADeepSpeed MLU version must be >= 0.15.2. Please update DeepSpeed.z0.14.3zBDeepSpeed MUSA version must be >= 0.14.3. Please update DeepSpeed.z0.9.3z<DeepSpeed version must be >= 0.9.3. Please update DeepSpeed.ACCELERATE_USE_FSDP>=zFSDP requires PyTorch >= z>`fsdp_plugin` must be a FullyShardedDataParallelPlugin object.   zFSDP2 requires PyTorch >= ACCELERATE_USE_MEGATRON_LMz7`megatron_lm_plugin` must be a MegatronLMPlugin object.z7Megatron is not installed. please build it from source.Fddp_handlerscaler_handlerinit_handlerfp8_recipe_handlerautocast_handlerprofile_handlerao_recipe_handlerte_recipe_handlermsamp_recipe_handlerz#Unsupported kwargs handler passed: z=, must be one that inherits `accelerate.utils.KwargsHandler`.zYou can only pass one z in `kwargs_handlers`.recipe_handlerT)tp_size!ACCELERATE_USE_PARALLELISM_CONFIG)mixed_precisioncpudynamo_plugindeepspeed_pluginfsdp_pluginmegatron_lm_pluginparallelism_config_from_acceleratorfp8z5Found `torchao` installed, using it for FP8 training.z@Found `transformer-engine` installed, using it for FP8 training.z3Found `msamp` installed, using it for FP8 training.zTried to train with `fp8` and auto-detect backend, but no FP8-compatible backend was installed. Valid backends are: `torchao`, `transformer-engine`, and `msamp`.zIPassing in an FP8 configuration requires setting `mixed_precision='fp8'`.TEztorchao with FSDP2 and cpu_ram_efficient_loading is not supported, setting `cpu_ram_efficient_loading` to False will fix the issue and work as intended.r   z
`log_with=z?` was passed but no supported trackers are currently installed.bf16downcast_bfloatzMCan only use `downcast_bf16` when using `mixed_precision='bf16'` and on a TPUzYou can only pass one of `gradient_accumulation_steps` and `gradient_accumulation_plugin`. Please only pass in the created `GradientAccumulationPlugin` object.&ACCELERATE_GRADIENT_ACCUMULATION_STEPS)	num_steps)gradient_accumulation_pluginfp16r   )	xpucudanpuxlamlumusahpusdaamps)check_is_tpuz*fp16 mixed precision requires a device in z (not ).r   z2.5.0z@fp16 mixed precision with MPS device requires a Pytorch >= 2.5.0device)r   r   r   zEbf16 mixed precision requires PyTorch >= 1.10 and a supported device.z2.6.0z@bf16 mixed precision with MPS device requires a Pytorch >= 2.6.0zt`accelerate` + `MS-AMP` + `FSDP` is not supported at this time. Please consider using deepspeed, which is supported.r   	generator )trackersproject_configurationr9   r   set_directoriesstrr7   
ValueErrorlistwarningswarnFutureWarningr<   r   _shared_statedistributed_typer,   	DEEPSPEEDdeepspeed_pluginsosenvirongetlowerr*   NotImplementedError
isinstancedictvalues	TypeErrorrT   ImportErrorrX   rA   rZ   deepspeed_engine_wrappedr/   r\   rn   fsdp_versionrm   r5   rW   r   r   r   r   r   r   r   r   r   has_lomo_optimizersetr+   r1   r2   r.   r(   r8   r'   r;   r6   has_fp8_handlerr3   	__class__addsetattrr   r   	to_kwargsstater   get_device_meshr   typedevice_mesh_validate_acceleratorr   fp8_enabledfp8_backendr   AOr   MSAMPNOr^   loggerinfor_   rY   delayed_fp8_autocastFSDP	MULTI_GPUr   cpu_ram_efficient_loadingr   logging_dirlenlog_withgetattrdistributedTypeXLAintre   r0   r   gradient_statedevice_placementr)   dataloader_configstep_scheduler_with_optimizerscaler
native_ampMEGATRON_LMr]   is_fsdp2rM   rN   rR   step_optimizers_models_schedulers_dataloaders_custom_objectsr   _load_model_state_pre_hook_save_model_state_pre_hook	rng_typesflag_tensorr?   )selfr   split_batchesr   gradient_accumulation_stepsr   r   r   r   torch_tp_pluginr   r  r   r   project_configr   r   kwargs_handlersdynamo_backendr   r   r   pluginfound_handlershandler_class_to_attrhandlerhandler_attrkwargsr   supported_devices                                 f/home/dmtnaga/Documents/work/airagagent/rag_env/lib/python3.13/site-packages/accelerate/accelerator.py__init__Accelerator.__init__  s   0 %)7&)=+)VD&"t'7'7'?&&66{C&!/2Om3 4_4EEVWdWiWiWkVlm  &MMM $)Cuvv%-nEM"-/M(-=-Ibcc) 0$ !.."4$&77?;T;TT$4$6$H$H!
 zz~~&@'JPPRV\\ $% " !.."4$&77?;T;TT$&88D)t  +T22/668F%fo>>'(^__ 9 (5;BJJ12)++!"vww!!#Kh??%&ijj @"$$#Kh??%&jkk @!+sG<<!"`aa,0D)::>>/9??AVKz7P
 P
 $D*>?? #<=Q<R!STT ::>>"7AGGIVS /0  k+IJJ `aa06BJJ,-"{'?'?1'D#D*?@@!$>?T>U"VWW%&(jjnn5QSZ&[&a&a&cgm&m "sw  02BCC YZZ7=BJJ34+--!"[\\  " "&!%!%$(! $#"')=."N1.,//5
!
  %&*!'=99 9'B  A9 $$6$'=g>O>O=PPf%ghh""7#4#454W5F5FGlG4#|3D<P<P<P+/D( + %*%6?V?V%W" CWMSSUY__%6%8"262C2C2O"",,.UW% 

+'.#11"

 


 ""%7%G%GHXHX%YDJJ"##99$?::55>Z/UZBZ D$8$8$8>#4#44)7)9&!!^%6%66)7)9&!!^%9%99,=,?)!!^%6%66'))KK WX-;-=D*466KK bc-;-=D*'))KK UV0A0CD-%\  $(D $)!##%%o.B.BOD]D]-^^ !lmm(,(8(8D(@ )TEZEZ))$$_ FD%  1 11

++/C/CC

&&@@ k  #8-=-=>x=1!5MMJxj0opq  &

$5u==++/B/BBlmm'3*a/  v  0
 +.%&NPkl+' ,FPk+l( ,)E
 !1$ 7 9!2-J* JJ&&&0  E)%%o.G.GIdId-ee"DOa{{'77;Q_c;d @AQ@RRXY]YdYdYiYiXllno  {{5(1A$1P1P !cdd8<8K8K8WT((224]_F }}3V4;;;K;KVvV-d.C.CNvNZZ''61d6K6K%%''T
 7
 {{#88"&"3D"9??+A+C+C !hii4;;#3#3u#<EUVZ\cEdEd !cdd
 "DO>#7#77((O,@,@@-O  **o.G.GG"1";F";DK 	 ! +6-'*5-' #>>!)]DN      c                .    U R                   R                  $ )a  
Returns the currently active DeepSpeedPlugin.

If using multiple plugins, the first one will be the active one by default. Manually call
`accelerator.state.select_deepspeed_plugin(key)` to activate a different plugin.

If deepspeed is not enabled, this will return `None`.
)r   r   r	  s    r  r   Accelerator.deepspeed_plugin~  s     zz***r  c                .    U R                   R                  $ )z@
Whether the Accelerator is configured for distributed training
)r   use_distributedr  s    r  r  Accelerator.use_distributed  s    
 zz)))r  c                   U R                   =(       ax    U R                  [        R                  [        R                  [        R
                  [        R                  [        R                  [        R                  [        R                  4;   $ N)
r  r   r,   r   	MULTI_MLU
MULTI_SDAA
MULTI_MUSA	MULTI_NPU	MULTI_XPU	MULTI_HPUr  s    r  multi_deviceAccelerator.multi_device  sg    ## 
(=(=%%%%&&&&%%%%%%B
 )
 	
r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.distributed_type  s    zz***r  c                .    U R                   R                  $ r"  )r   num_processesr  s    r  r.  Accelerator.num_processes      zz'''r  c                .    U R                   R                  $ r"  )r   process_indexr  s    r  r2  Accelerator.process_index  r0  r  c                .    U R                   R                  $ r"  )r   local_process_indexr  s    r  r5  Accelerator.local_process_index  s    zz---r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.device  s    zz   r  c                .    U R                   R                  $ r"  )r   r
  r  s    r  r
  Accelerator.split_batches  s    %%333r  c                .    U R                   R                  $ r"  )r   dispatch_batchesr  s    r  r<  Accelerator.dispatch_batches  s    %%666r  c                .    U R                   R                  $ r"  r   even_batchesr  s    r  r@  Accelerator.even_batches      %%222r  c                $    XR                   l        g r"  r?  )r	  values     r  r@  rA    s    .3+r  c                .    U R                   R                  $ r"  )r   use_seedable_samplerr  s    r  rF   Accelerator.use_seedable_sampler  s    %%:::r  c                .    U R                   R                  $ r"  )r   non_blockingr  s    r  rI  Accelerator.non_blocking  rB  r  c                f    [        U R                  S5      (       a  U R                  R                  $ g)Nuse_stateful_dataloaderF)hasattrr   rL  r  s    r  rL  #Accelerator.use_stateful_dataloader  s+    4))+DEE))AAAr  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.project_dir      ))555r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.logging_dir  rQ  r  c                .    U R                   R                  $ r"  )r   	iterationr  s    r  save_iterationAccelerator.save_iteration  s    ))333r  c                .    U R                   R                  $ )zTrue for one process only.)r   is_main_processr  s    r  rY  Accelerator.is_main_process  s     zz)))r  c                .    U R                   R                  $ )z True for one process per server.)r   is_local_main_processr  s    r  r\  !Accelerator.is_local_main_process  s     zz///r  c                :    U R                   U R                  S-
  :H  $ )Nr   )r2  r.  r  s    r  is_last_processAccelerator.is_last_process  s    !!T%7%7!%;;;r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.mixed_precision  s    zz)))r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.is_fsdp2  s    zz"""r  c                    U R                   $ r"  )r   r  s    r  !is_composable_parallelism_enabled-Accelerator.is_composable_parallelism_enabled  s    }}r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r   Accelerator.parallelism_config  s    zz,,,r  c                .    U R                   R                  $ r"  )r   r   r  s    r  torch_device_meshAccelerator.torch_device_mesh  s    zz%%%r  c                    U R                   =nc  U R                  R                  $ UR                  SUR                  S0ng)Ndp_replicatecpT)r   r   r\  dp_replicate_enabled
cp_enabled)r	  pc_non_model_shard_dimss      r  should_save_modelAccelerator.should_save_model  sD    )))B2::333##^MM4!
 r  c                    U R                   (       a7  U R                   R                  (       a  U R                  R                  S5      $ g[	        S5      e)z
Returns the local rank for tensor parallelism. If tensor parallelism is configured but not enabled, returns 0
since all ranks are assumed to be the same.
tpr   zETensor parallelism is not configured. Set `parallelism_config` first.)r   
tp_enabledrk  get_local_rankRuntimeErrorr  s    r  tensor_parallel_rank Accelerator.tensor_parallel_rank  sB     ""&&11--<<TBBbccr  c                    [        S5      e)z,
Pipeline parallelism is not supported yet.
z>Pipeline parallelism is currently not supported in Accelerate.r   r  s    r  pipeline_parallel_rank"Accelerator.pipeline_parallel_rank  s    
 ""bccr  c                    [        S5      e)z+
Context parallelism is not supported yet.
z=Context parallelism is currently not supported in Accelerate.r~  r  s    r  context_parallel_rank!Accelerator.context_parallel_rank   s    
 ""abbr  c                    U R                   (       a7  U R                   R                  (       a  U R                  R                  S5      $ g[	        S5      e)z
Returns the local rank for replicate-based data parallelism. If replicate-based data parallelism is configured
but not enabled, returns 0 since all ranks are assumed to be the same.
rn  r   zCData parallelism is not configured. Set `parallelism_config` first.)r   rp  rk  ry  rz  r  s    r  data_parallel_rankAccelerator.data_parallel_rank'  sB     ""&&;;--<<^LL`aar  c                    U R                   (       a7  U R                   R                  (       a  U R                  R                  S5      $ g[	        S5      e)z
Returns the local rank for shard-based data parallelism. If shard-based data parallelism is configured but not
enabled, returns 0 since all ranks are assumed to be the same.
dp_shardr   zOShard-based data parallelism is not configured. Set `parallelism_config` first.)r   dp_shard_enabledrk  ry  rz  r  s    r  data_parallel_shard_rank$Accelerator.data_parallel_shard_rank3  sB     ""&&77--<<ZHHlmmr  c              #  v   #    [        5       R                  XS9 nUv   SSS5        g! , (       d  f       g= f7f)a  
Splits `input` between `self.num_processes` quickly and can be then used on that process. Useful when doing
distributed inference, such as with different prompts.

Note that when using a `dict`, all keys need to have the same number of elements.

Args:
    inputs (`list`, `tuple`, `torch.Tensor`, or `dict` of `list`/`tuple`/`torch.Tensor`):
        The input to split between processes.
    apply_padding (`bool`, `optional`, defaults to `False`):
        Whether to apply padding by repeating the last element of the input so that all processes have the same
        number of elements. Useful when trying to perform actions such as `Accelerator.gather()` on the outputs
        or passing in less inputs than there are processes. If so, just remember to drop the padded elements
        afterwards.

Example:

```python
# Assume there are two processes
from accelerate import Accelerator

accelerator = Accelerator()
with accelerator.split_between_processes(["A", "B", "C"]) as inputs:
    print(inputs)
# Process 0
["A", "B"]
# Process 1
["C"]

with accelerator.split_between_processes(["A", "B", "C"], apply_padding=True) as inputs:
    print(inputs)
# Process 0
["A", "B"]
# Process 1
["C", "C"]
```
)apply_paddingN)r   split_between_processes)r	  inputsr  s      r  r  #Accelerator.split_between_processes?  s/     N ^33F3X\bL YXXs   9(	9
69c                V   ^ Tc  SU R                   ;   a  U mO[        S5      eU4S jnU$ )a  
A decorator that will run the decorated function on the main process only. Can also be called using the
`PartialState` class.

Args:
    function (`Callable`): The function to decorate.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()


>>> @accelerator.on_main_process
... def print_something():
...     print("This will be printed by process 0 only.")


>>> print_something()
"This will be printed by process 0 only"
```
Accelerator.gThe `on_main_process` decorator must be called with a function on an instantiated `Accelerator` object.c                 B   > [        5       R                  T5      " U 0 UD6$ r"  )r   on_main_processargsr  functions     r  _inner+Accelerator.on_main_process.<locals>._inner       >11(;TLVLLr  __qualname__r   r	  r  r  s    ` r  r  Accelerator.on_main_processi  ;    4 !2!22 } 	M r  c                V   ^ Tc  SU R                   ;   a  U mO[        S5      eU4S jnU$ )aO  
A decorator that will run the decorated function on the local main process only. Can also be called using the
`PartialState` class.

Args:
    function (`Callable`): The function to decorate.

Example:
```python
# Assume we have 2 servers with 4 processes each.
from accelerate import Accelerator

accelerator = Accelerator()


@accelerator.on_local_main_process
def print_something():
    print("This will be printed by process 0 only on each server.")


print_something()
# On server 1:
"This will be printed by process 0 only"
# On server 2:
"This will be printed by process 0 only"
```
r  zmThe `on_local_main_process` decorator must be called with a function on an instantiated `Accelerator` object.c                 B   > [        5       R                  T5      " U 0 UD6$ r"  )r   on_local_main_processr  s     r  r  1Accelerator.on_local_main_process.<locals>._inner  s     >77A4R6RRr  r  r  s    ` r  r  !Accelerator.on_local_main_process  s>    : !2!22  D 	S r  c                V   ^ Tc  SU R                   ;   a  U mO[        S5      eU4S jnU$ )a  
A decorator that will run the decorated function on the last process only. Can also be called using the
`PartialState` class.

Args:
    function (`Callable`): The function to decorate.

Example:
```python
# Assume we have 4 processes.
from accelerate import Accelerator

accelerator = Accelerator()


@accelerator.on_last_process
def print_something():
    print(f"Printed on process {accelerator.process_index}")


print_something()
"Printed on process 3"
```
r  zgThe `on_last_process` decorator must be called with a function on an instantiated `Accelerator` object.c                 B   > [        5       R                  T5      " U 0 UD6$ r"  )r   on_last_processr  s     r  r  +Accelerator.on_last_process.<locals>._inner  r  r  r  r  s    ` r  r  Accelerator.on_last_process  r  r  c                   ^^ U b  Tb  Tc  [        U R                  TS9$ Tc  SU R                  ;   a  U mO[        S5      eUU4S jnU$ )aZ  
A decorator that will run the decorated function on a given process index only. Can also be called using the
`PartialState` class.

Args:
    function (`Callable`, `optional`):
        The function to decorate.
    process_index (`int`, `optional`):
        The index of the process on which to run the function.

Example:
```python
# Assume we have 4 processes.
from accelerate import Accelerator

accelerator = Accelerator()


@accelerator.on_process(process_index=2)
def print_something():
    print(f"Printed on process {accelerator.process_index}")


print_something()
"Printed on process 2"
```
)r2  r  r  c                 D   > [        5       R                  TT5      " U 0 UD6$ r"  )r   
on_process)r  r  r  r2  s     r  r  &Accelerator.on_process.<locals>._inner	  s"    >,,X}EtVvVVr  )r   r  r  r   )r	  r  r2  r  s    `` r  r  Accelerator.on_process  s\    : =#<8CS4??-HH!2!22 } 	W r  c                   ^^ U b  Tb  Tc  [        U R                  TS9$ Tc  SU R                  ;   a  U mO[        S5      eUU4S jnU$ )a  
A decorator that will run the decorated function on a given local process index only. Can also be called using
the `PartialState` class.

Args:
    function (`Callable`, *optional*):
        The function to decorate.
    local_process_index (`int`, *optional*):
        The index of the local process on which to run the function.

Example:
```python
# Assume we have 2 servers with 4 processes each.
from accelerate import Accelerator

accelerator = Accelerator()


@accelerator.on_local_process(local_process_index=2)
def print_something():
    print(f"Printed on process {accelerator.local_process_index}")


print_something()
# On server 1:
"Printed on process 2"
# On server 2:
"Printed on process 2"
```
)r5  r  r  c                 D   > [        5       R                  TT5      " U 0 UD6$ r"  )r   on_local_process)r  r  r  r5  s     r  r  ,Accelerator.on_local_process.<locals>._inner9  s%    >228=PQSWb[abbr  )r   r  r  r   )r	  r  r5  r  s    `` r  r  Accelerator.on_local_process  sa    @ #6#BIY400FYZZ!2!22 } 	c r  c              #  |   #    U R                   R                  5          Sv   SSS5        g! , (       d  f       g= f7f)a  
Lets the main process go first inside a with block.

The other processes will enter the with block after the main process exits.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> with accelerator.main_process_first():
...     # This will be printed first by process 0 then in a seemingly
...     # random order by the other processes.
...     print(f"This will be printed by process {accelerator.process_index}")
```
N)r   main_process_firstr  s    r  r  Accelerator.main_process_first>  s%     & ZZ**, -,,   <+	<
9<c              #  |   #    U R                   R                  5          Sv   SSS5        g! , (       d  f       g= f7f)a  
Lets the local main process go inside a with block.

The other processes will enter the with block after the main process exits.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> with accelerator.local_main_process_first():
...     # This will be printed first by local process 0 then in a seemingly
...     # random order by the other processes.
...     print(f"This will be printed by process {accelerator.local_process_index}")
```
N)r   local_main_process_firstr  s    r  r  $Accelerator.local_main_process_firstT  s%     & ZZ002 322r  c              #    #    U R                   (       a(  UR                  S5         Sv   UR                  S5        g[        R                  nU R                  (       aO  U R
                  [        R                  :w  d$  U R                  R                  R                  S:  a  [        USU5      nU" 5          Sv   SSS5        g! UR                  S5        f = f! , (       d  f       g= f7f)a  
A context manager to disable gradient synchronizations across DDP processes by calling
`torch.nn.parallel.DistributedDataParallel.no_sync`.

If `model` is not in DDP, this context manager does nothing

Args:
    model (`torch.nn.Module`):
        PyTorch Module that was prepared with `Accelerator.prepare`

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)
>>> input_a = next(iter(dataloader))
>>> input_b = next(iter(dataloader))

>>> with accelerator.no_sync():
...     outputs = model(input_a)
...     loss = loss_func(outputs)
...     accelerator.backward(loss)
...     # No synchronization across processes, only accumulate gradients
>>> outputs = model(input_b)
>>> accelerator.backward(loss)
>>> # Synchronization across all processes
>>> optimizer.step()
>>> optimizer.zero_grad()
```
FNTr   no_sync)r   set_requires_gradient_sync
contextlibnullcontextr  r   r,   r   r   r   
zero_stager   )r	  modelcontexts      r  r  Accelerator.no_syncj  s     D ==,,U37006 ,,G##((O,E,EEIdIdIoIorsIs%eY@G  006 s4   #C(C B	C(3C8	C(CC(
C%!C(c              #  B  #    [        U [        R                  R                  R                  5      (       d  Sv   gU R
                  nU R                  nSU l        SU l        U R                  R                  / 5         Sv   Xl        X l        g! Xl        X l        f = f7f)an  Trigger the sync of the gradients in the next backward pass of the model after multiple forward passes under
`Accelerator.no_sync` (only applicable in multi-GPU scenarios).

        If the script is not launched in distributed mode, this context manager does nothing.

        Args:
            model (`torch.nn.Module`):
                The model for which to trigger the gradient synchronization.

        Example:

        ```python
        >>> from accelerate import Accelerator

        >>> accelerator = Accelerator()
        >>> dataloader, model, optimizer = accelerator.prepare(dataloader, model, optimizer)

        >>> with accelerator.no_sync():
        ...     loss_a = loss_func(model(input_a))  # first forward pass
        ...     loss_b = loss_func(model(input_b))  # second forward pass
        >>> accelerator.backward(loss_a)  # No synchronization across processes, only accumulate gradients
        >>> with accelerator.trigger_sync_in_backward(model):
        ...     accelerator.backward(loss_b)  # Synchronization across all processes
        >>> optimizer.step()
        >>> optimizer.zero_grad()
        ```
NT)	r   torchnnparallelDistributedDataParallelrequire_backward_grad_syncrequire_forward_param_syncreducerprepare_for_backward)r  old_require_backward_grad_syncold_require_forward_param_syncs      r  trigger_sync_in_backward$Accelerator.trigger_sync_in_backward  s     < %!2!2!J!JKK).)I)I&).)I)I& ,0(+/(**2.	N/M,/M, 0N,/M,s   A:B=B BBBc                ^   U R                   R                  (       a>  U R                   R                  (       a#  SU l        U R                   R	                  S5        gU =R                  S-  sl        U R                   R	                  U R                  U R                   R
                  -  S:H  5        g)zRSets the right `sync_gradients` context and either resets or increases `self.step`r   Tr   N)r   sync_with_dataloaderend_of_dataloaderr   _set_sync_gradientsr   r  s    r  _do_syncAccelerator._do_sync  sv    338K8K8]8]DI33D9IINI33TYYATATA^A^5^cd4der  c                .    U R                   R                  $ r"  r   sync_gradientsr  s    r  r  Accelerator.sync_gradients  s    ""111r  c                $    XR                   l        g r"  r  )r	  r  s     r  r  r    s    -;*r  c                .    U R                   R                  $ r"  )r   r   r  s    r  r  'Accelerator.gradient_accumulation_steps  s    "",,,r  c                R    U R                   R                  R                  SU05        g )Nr   )r   plugin_kwargsupdate)r	  r  s     r  r  r    s"    ))00+?Z1[\r  c              '    #    U R                  5         U R                  =(       d9    U R                  =(       a&    U R                  R                  R                  SS5      n[        R                  " 5        nU H?  nUR                  U(       a  [        R                  " 5       OU R                  U5      5        MA     Sv   SSS5        g! , (       d  f       g= f7f)aD  
A context manager that will lightly wrap around and perform gradient accumulation automatically

Args:
    *models (list of `torch.nn.Module`):
        PyTorch Modules that were prepared with `Accelerator.prepare`. Models passed to `accumulate()` will
        skip gradient syncing during backward pass in distributed training

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(gradient_accumulation_steps=1)
>>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)

>>> for input, output in dataloader:
...     with accelerator.accumulate(model):
...         outputs = model(input)
...         loss = loss_func(outputs)
...         loss.backward()
...         optimizer.step()
...         scheduler.step()
...         optimizer.zero_grad()
```
sync_each_batchFN)r  r  r  r   r  r   r  	ExitStackenter_contextr  r  )r	  modelsallow_gradient_synccm_stackms        r  
accumulateAccelerator.accumulate  s     8 	  
 $$ T''5599:KUS 	 !!#x&&CVz'='='?\`\h\hij\kl  $##s   A2C4A
C>	C
CCc              #  N  #    U R                   (       a  / nUb  Sn[        U R                  5       HT  u  pV[        U[        5      (       a  SnM  UR                  XVR                  R                  45        X&R                  l        MV     U(       a  [        R                  " S5        OU R                  nU(       a  SOSn [        XSS9   Sv   SSS5        U H"  u  pXXR                  U   R                  l        M$     gU R                  [        R                  :w  a  [        R                  " S5        [        R                  " U5         Sv   SSS5        g! , (       d  f       N= f! U H"  u  pXXR                  U   R                  l        M$     f = f! , (       d  f       g= f7f)a  
A context manager that facilitates distributed training or evaluation on uneven inputs, which acts as a wrapper
around `torch.distributed.algorithms.join`. This is useful when the total batch size does not evenly divide the
length of the dataset.

Args:
    joinables (`list[torch.distributed.algorithms.Joinable]`):
        A list of models or optimizers that subclass `torch.distributed.algorithms.Joinable`. Most commonly, a
        PyTorch Module that was prepared with `Accelerator.prepare` for DistributedDataParallel training.
    even_batches (`bool`, *optional*)
        If set, this will override the value of `even_batches` set in the `Accelerator`. If it is not provided,
        the default `Accelerator` value wil be used.

<Tip warning={true}>

`join_uneven_inputs` is only supported for Distributed Data Parallel training on multiple GPUs. For any other
configuration, this method will have no effect.

</Tip>

<Tip warning={true}>

Overriding `even_batches` will not affect iterable-style data loaders.

</Tip>

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(even_batches=True)
>>> ddp_model, optimizer, dataloader = accelerator.prepare(model, optimizer, dataloader)

>>> with accelerator.join_uneven_inputs([ddp_model], even_batches=False):
...     for input, output in dataloader:
...         outputs = model(input)
...         loss = loss_func(outputs)
...         loss.backward()
...         optimizer.step()
...         optimizer.zero_grad()
```
NFTzjOverriding even_batches is only supported for map-style datasets, yet some dataloaders given were iterable)enablethrow_on_early_terminationzuJoining uneven inputs is only supported for multi-GPU training, as a result `join_uneven_inputs` will have no effect.)r)  	enumerater  r   r   appendbatch_samplerr@  r   r   r   r   r,   r   r  r  )	r	  	joinablesr@  dl_even_batches_valuesiterable_dl_seendl_idxdlenable_joineven_batches_values	            r  join_uneven_inputsAccelerator.join_uneven_inputs  sh    Z %'"'#( "+D,=,=">JF!"&:;;+/( *116;K;K;X;X2YZ4@$$1 #? $MM E  $00#/%TK^)TYZ [ 3I.FK]%%f-;;H 3I $$(:(:: L ''	2 32 [Z 3I.FK]%%f-;;H 3I 32sO   B;F%>
E' EE' A3F%F	F%
E$ E' '*FF%
F"F%c                <    U R                   R                  " U0 UD6  g)z
Drop in replacement of `print()` to only print once per server.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> accelerator.print("Hello world!")
```
N)r   print)r	  r  r  s      r  r  Accelerator.printe  s     	

$)&)r  c                   U(       a  [        U[        R                  R                  R                  5      (       a  U R                  XS9$ [        U[        R                  R                  5      (       a  U R                  XS9$ [        U[        R                  R                  5      (       a  U R                  XS9nU$  U$ [        U[        5      (       a  U R                  U5      nU$ U$ )Nr   )r   r  utilsdata
DataLoaderr   r  Moduleprepare_modeloptim	Optimizerprepare_optimizerr   prepare_scheduler)r	  obj
first_passr   	optimizer	schedulers         r  _prepare_oneAccelerator._prepare_onet  s    #u{{//::;;///WWC11))#)QQC!6!677 2232Z	   8 
	 [))..s3I
r  r  c                 ^ ^ Uc  U Vs/ s H  nSPM     nnOiT R                   [        R                  [        R                  4;   a  [	        S5      e[        U5      [        U5      :w  a  [	        S[        U5       S35      eU H  n[        U[        R                  R                  5      (       d  M.  T R                  U5      (       d  MF  T R                   [        R                  :w  d  Mf  [        R                  R                  SS5      S:w  d  M  [	        S5      e   T R                   [        R                  :X  aL  S	nU H3  n[        U[        R                  R                  5      (       d  M.  US
-  nM5     US
:  a  [        S5      eT R                   [        R                   :X  a(  T R#                  5       u  pgUb  Ub  Xg:w  a  [	        S5      eT R$                  (       a  S	nS	n['        U5       He  u  p[        U[        R                  R                  5      (       a  US
-  nM5  [        U[        R(                  R*                  5      (       d  M`  US
-  nMg     US
:  a  US	:  d  US	:  a  US
:  a  [	        S5      eUS
:  a  [	        S5      eT R,                  =(       a    T R                   [        R                   :H  n
U
(       a  T R.                  " USS06nT R                   [        R0                  [        R2                  [        R                  4;   a  [5        SS5      (       as  T R6                  R8                  S:X  d  T R6                  R8                  S:X  a?  T R:                  R<                  (       a$  [>        RA                  S5        T RB                  " U6 nT RD                  (       a*  T RD                  RF                  (       a  T RH                  " U6 nT RD                  (       a*  T RD                  RJ                  (       a  T RL                  " U6 nT RN                  [P        RR                  :X  a  T RT                  " U6 nO-T RN                  [P        RV                  :X  a  T RX                  " U6 nT R                   [        R                  :X  a  T RZ                  " U6 nOT R                   [        R                  :X  a  T R\                  " U6 nOT R$                  (       a  T R^                  " U6 nOnT RN                  [P        R`                  :X  a  T Rb                  " USU06u  p![e        U 4S j[g        X!5       5       5      n[e        U 4S j[g        X5       5       5      nU
(       a{  T R.                  " U6 nWRi                  5        VVs0 s H
  u  pXU   _M     nnnU H?  n[        U[        R(                  R*                  5      (       d  M.  URk                  U5        MA     U HR  m[m        U4S jT Rn                  T Rp                  T Rr                  T Rt                  4 5       5      (       d  MK  STl;        MT     [        U5      S
:  a  U$ US	   $ s  snf s  snnf )a  
Prepare all objects passed in `args` for distributed training and mixed precision, then return them in the same
order.

Args:
    *args (list of objects):
        Any of the following type of objects:

        - `torch.utils.data.DataLoader`: PyTorch Dataloader
        - `torch.nn.Module`: PyTorch Module
        - `torch.optim.Optimizer`: PyTorch Optimizer
        - `torch.optim.lr_scheduler.LRScheduler`: PyTorch LR Scheduler

    device_placement (`list[bool]`, *optional*):
        Used to customize whether automatic device placement should be performed for each object passed. Needs
        to be a list of the same length as `args`. Not compatible with DeepSpeed or FSDP.

<Tip>

  You don't need to prepare a model if you only use it for inference without any kind of mixed precision

</Tip>

Examples:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume a model, optimizer, data_loader and scheduler are defined
>>> model, optimizer, data_loader, scheduler = accelerator.prepare(model, optimizer, data_loader, scheduler)
```

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume a model, optimizer, data_loader and scheduler are defined
>>> device_placement = [True, True, False, False]
>>> # Will place the first two items passed in automatically to the right device but not the last two.
>>> model, optimizer, data_loader, scheduler = accelerator.prepare(
...     model, optimizer, data_loader, scheduler, device_placement=device_placement
... )
```
NzDYou can't customize device placements with DeepSpeed or Megatron-LM.z)`device_placement` should be a list with z) elements (the number of objects passed).ACCELERATE_BYPASS_DEVICE_MAPr   r   You can't train a model that has been loaded with `device_map='auto'` in any distributed mode. Please rerun your script specifying `--num_processes=1` or by launching with `python {{myscript.py}}`.r   r   zUYou can't use same `Accelerator()` instance with multiple models when using DeepSpeeda~  The model and the optimizer parameters are not on the same device, which probably means you created an optimizer around your model **before** putting on the device. Make sure the line model.to(device) is before the optimizer creation in your script or remove it entirely and use the flag default value for `device_placement` in your `Accelerator` to let it handle that part for you.zWhen using FSDP2, a model and optimizer must be passed together to `Accelerator.prepare()` as the optimizer needs to have its parameters modified after the model is converted.z,Only one model is supported when using FSDP2	drop_refsFr   z2.7.0r   r   a  You are using lower version of PyTorch(< 2.7.0) with ipex acceleration on Intel CPU or XPU, Intel has upstreamed most of the optimizations into stock PyTorch from 2.7.0, we encourage you to install the latest stock PyTorch and enjoy the out-of-experience on Intel CPU/XPU.r   c              3  J   >#    U  H  u  pTR                  US US9v   M     g7f)T)r
  r   Nr  .0r	  dr	  s      r  	<genexpr>&Accelerator.prepare.<locals>.<genexpr>  s+      ZuPVPS!!#$!KZus    #c              3  F   >#    U  H  u  pTR                  XS 9v   M     g7f)r  Nr  r  s      r  r  r    s#     rTq&#4,,S,ETqs   !c              3  .   >#    U  H
  nTU;   v   M     g 7fr"  r   )r  	containeritems     r  r  r  &  s      !fI 	!!fs   T)<r   r,   r   r   r   r   r   r  r  r  verify_device_mapr   r   r   r   AssertionErrorr   _get_devicesr   r  r  r  r   _get_named_parameters	MULTI_CPUr'  r\   r   r   r   use_ipexr   warning_prepare_ipexr   rx  _prepare_tprq  _prepare_cpr   r   r   _prepare_ter   _prepare_ao_prepare_deepspeed_prepare_megatron_lm_prepare_fsdp2r   _prepare_msamptuplezipitems_switch_parametersanyr  r  r   r  _is_accelerate_prepared)r	  r   r  _r	  model_countmodel_deviceoptimizer_deviceoptimizer_countitpu_should_fix_optimizerold_named_paramsresultnew_named_paramsnpmappingr  s   `                @r  prepareAccelerator.prepare  sF   \ #.23dd3""'@'@/B]B]&^^cdd!"c$i/;CI;Fop  C 300**3//))_-?-??JJNN#A7KvU ~     O$=$==Kc588??331$K  Q$k    O$7$77-1->->-@*L',<,H\Mm $  ==KO#D/c588??331$KU[[%:%:;;#q(O	 * aOa$7[1_Q`cdQd l  Q !OPP $(#8#8#iT=R=RVeViVi=i ##994Q5Q  _%>%>@Y@Y[j[m[m$nn g..[[%%.$++2B2Be2KJJ'' g ))40""t'>'>'I'I##T*D""t'>'>'I'I##T*D~000##T*D!2!22##T*D  O$=$==,,d3F""o&A&AA..5F]](($/F>#7#77)-)<)<d)fUe)f& Z]^bZu F rTWX^TqrrF##996B:J:P:P:RS:R$!q1--:RGSc5;;#8#899**73  D "&"3"3T\\4CSCSUYUeUe!f   04,  Vqv7fQi7q  4V Ts   Y 9Yc                   U Vs/ s H>  n[        U[        R                  R                  5      (       d  U R	                  USS9OUPM@     nnU Vs/ s H?  n[        U[        R                  R                  5      (       d  U R	                  U5      OUPMA     nnU R
                  nU GH   n[        U[        R                  R                  5      (       d  M/  SSKJnJn  SSK	J
n  Un	Un
U	R                  5        H  u  p[        X5      (       a  M  UR                  XS   U" 5       /S9nUR                  SS	5      u  pU	R                  U5      nU
" 5       R                  UUS   5        [        U[        R                  R                   5      (       d'  [        R                  R!                  XR"                  S
9n[%        UX5        M     GM#     U$ s  snf s  snf )NTr
  r   )DTensor	Replicate)ReplicateParallelrw  )r   
placements.r   requires_grad)r   r  r  r  r  rk  torch.distributed.tensorrE  rF  )transformers.integrations.tensor_parallelrG  named_parameters
from_localrsplitget_submoduleprepare_module_tp	ParameterrK  r   )r	  r  r	  r<  r   argrE  rF  rG  r  tp_plannameparamdp
param_name
param_typemodule_to_tps                    r  r&  Accelerator._prepare_tp.  s    qu
ptil:c588??;[;[Dcd3addpt 	 

 fllek^a
30P0P$##C(VYYekl,,Cc588??33CS%(E'G$557e--''t;LZcZeYf'g)-S!)<&
$22:>	++L+d:KL!"ehh&8&899++B>Q>Q+RBj5  8 . A

 ms   AGAGc                8   SSK Jn  SSKJn  U R                  R
                  R                  nU" U5        [        R                  " X R                  S   S9U l
        U H9  n[        U[        R                  R                  5      (       d  M.  [        U5        M;     U$ )Nr   )context_parallel)set_rotate_methodro  )mesh)%torch.distributed.tensor.experimentalr^  0torch.distributed.tensor.experimental._attentionr_  r   
cp_handlercp_comm_strategy	functoolsr   rk  _cp_contextr   r  r  r  r   )r	  r  r^  r_  rd  rT  s         r  r'  Accelerator._prepare_cpR  sw    JV22==NN*+$,,-=DZDZ[_D`aC#uxx//.s3  r  c           	        U Vs/ s H>  n[        U[        R                  R                  5      (       d  U R	                  USS9OUPM@     nnU Vs/ s H?  n[        U[        R                  R                  5      (       d  U R	                  U5      OUPMA     nnSu  pE[        U5       H2  u  pb[        U[        R                  R                  5      (       d  M0  XbpTM4     Uc  [        U5      $ U R                  R                  R                  U5        U R                  R                  R                  (       a  [        X5      nU R                  R                  R                  [        R                  :w  a  [!        U5      (       d  U R                  R                  R"                  (       a/  [%        U40 U R                  R                  R'                  5       D6nO9[        R(                  " U40 U R                  R                  R'                  5       D6n[+        U R,                  " [        U5      SS065      nSSKJn  U H  n[        U[        R2                  R4                  5      (       d  M.  UR6                   H  n	[        U	S   5       Hz  u  pj[        R8                  " SU
R:                  U
R<                  S	9U	S   U'   [        X5      (       a  U
R>                  RA                  5       OU
RA                  5       U	S   U   l         M|     M     M     U RB                  RE                  U5        [G        X5      n[I        U RB                  5      S:  a,  U RB                  S
   U RB                  S   L a  U RB                  S
	 XSU'   [+        U R,                  " U6 5      nURK                  5        VV
s0 s H
  u  pXU   _M     nnn
U H9  n[        U[        R2                  R4                  5      (       d  M.  [M        X-5        M;     U$ s  snf s  snf s  sn
nf )NTrD  NNr  r   rE  paramsr   )dtyper   )'r   r  r  r  r  r  r.  r   r   set_auto_wrap_policyactivation_checkpointingrG   r   r   r-   r   rt   use_regional_compilationrr   r   compilerH   r!  rL  rE  r  r  param_groupsemptyrl  r   _local_tensordata_ptrr  r  rI   r   r0  rJ   )r	  r  r	  r<  model_indexr  r9  r;  rE  param_groupr?  r=  r>  r@  s                 r  r,  Accelerator._prepare_fsdp2a  sO    qu
ptil:c588??;[;[Dcd3addpt 	 

 fllek^a
30P0P$##C(VYYekl ('FA#uxx//%&U (
 =  	

33E: ::!!::"4/E ::##++}/?/??HZ[`HaHazz''@@'V1I1I1S1S1UVeTtzz/G/G/Q/Q/ST 4D4N4NPUV\P]4nim4no 	5C#u{{4455#&#3#3K )+h*? @ 49;;qXYX`X`3aH-a0:DQ:P:PAOO446VWV`V`Vb $H-a09	 !A $4  	E" $D0 t||q dll2&6$,,r:J&JR  ${ 4D4N4NPV4WX6F6L6L6NO6Nda1q))6NOC#u{{44551#?  M

 mv Ps   AO;AP &Pc                   Uc0  U R                   =(       a    U R                  [        R                  :g  nU R                  R                  U5        U R                  U5      (       aM  U R                  [        R                  :w  a/  [        R                  R                  SS5      S:w  a  [        S5      eU R                  (       a  UR                  Ul        [        U R                  U R                   5      nU R"                  [$        R&                  :X  d  [)        UR                  S5      (       d#  UR                  n[+        U" U5      5      Ul        O\UR                  R,                  nU" U5      n[/        Xa5      Ul        [/        [+        UR                  R,                  5      U5      Ul        U R"                  [$        R0                  :X  a9  U R2                  (       d(  [5        XR6                  =(       d    U R8                  5      n[;        USS5      (       d  [;        USS5      (       Ga  [;        US	S5      (       Ga  [=        UR>                  RA                  5       5      n[C        U5      S
:  a)  U R                  [        R                  :w  a  [        S5      e[C        U5      S
:X  a  [E        U5      S   n[G        U[H        RJ                  5      (       a  URL                  n	O8[G        U[N        5      (       a!  [H        RJ                  " U5      RL                  n	OUn	U RJ                  RP                  S:X  a  [S        5       (       a  OL[H        RJ                  " U	5      U RJ                  :w  a(  U RJ                  RL                  c  U	S:w  a  [        S5      eSU;   a  [S        5       (       a  SU;   a  [U        5       (       d  SU;   a  [        S5      eO8U(       a1  U R                  U5      (       d  URW                  U RJ                  5      nU(       Gd  U RX                  (       Gav  U RZ                  (       a  U RZ                  R\                  (       GdI  [_        U5      (       a  [        S5      e[a        S URc                  5        5       5      (       Ga  U Rd                  b  U Rd                  Rg                  5       O0 n
[        R                  R                  SS5      S:w  a`  U RJ                  RP                  S:X  a-  U RJ                  RL                  /U RJ                  RL                  pOU Rh                  /U Rh                  pOSu  p[H        Rj                  Rl                  Rn                  " U4XS.U
D6nU Rd                  b  U Rd                  Rq                  U5        GOU RZ                  (       a  U RZ                  R\                  (       aq  [)        US5      (       d  [s        S5      eURt                  U RZ                  Rt                  :w  a/  [        SU RZ                  Rt                   SURt                   35      eGOpU Rv                  (       a  [        S5      eU R                  [        R                  :X  Ga.  SSK<J=n  [G        X5      =(       d(    [}        U5      =(       a    [G        UR~                  U5      nU(       Gd  U R                  R                  R                  U5        U R                  R                  n[        UR                  UU RJ                  5      UlD        UR                  =(       d    UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  UR                  U RJ                  S.n
[G        U
S   [N        5      (       a~  [        R                  " U
S   5      n/ nUR                  5        HJ  u  nnUR                  U5      (       d  M  URW                  U RJ                  5        UR                  U5        ML     UU
S'   U" U40 U
D6nUR                  (       a9  SSKUJVnJWnJXn  U" U[        R                  " UUR                  S9UR                  S 9  U R                  S!:w  Ga  / nUR                  U5       GH%  nUR                  (       d  M  UR                  nUR                  [H        R                  :w  d  MC  URJ                  [H        RJ                  " S"5      :w  d  Mi  UR                  (       d  M|  UR                  R                  R                  S#R                  UR                  R                  5      4nUU;  a  UR                  U5        UR                  RW                  [H        R                  5      Ulh        [H        R                  UR                  lj        GM(     U R                  (       aN  U H#  u  nn[        R                  " S$U S%U S&35        M%     [C        U5      S:  a  [        R                  " S'5        [C        U R                  5      S
:  a,  U R                  S(   U R                  S)   L a  U R                  S(	 XR                  S)'   GOU R                  [        R                  :X  a  U Rd                  (       a  U Rd                  Rg                  5       O0 n
[H        Rj                  Rl                  Rn                  " U40 U
D6nU Rd                  b  U Rd                  Rq                  U5        OhU R                  [        R                  :X  aJ  U R                  R                  (       a/  [        R                  " U5      RW                  U RJ                  5      nU R"                  [$        R0                  :X  a9  U R2                  (       a(  [5        XR6                  =(       d    U R8                  5      nU R                  R                  R                  [        R                  :w  a  [}        U5      (       d  U R                  R                  R                  (       a0  [        U40 U R                  R                  Rg                  5       D6nU$ [H        R                  " U40 U R                  R                  Rg                  5       D6nU$ )*a\  
Prepares a PyTorch model for training in any distributed setup. It is recommended to use
[`Accelerator.prepare`] instead.

Args:
    model (`torch.nn.Module`):
        A PyTorch model to prepare. You don't need to prepare a model if it is used only for inference without
        any kind of mixed precision
    device_placement (`bool`, *optional*):
        Whether or not to place the model on the proper device. Will default to `self.device_placement`.
    evaluation_mode (`bool`, *optional*, defaults to `False`):
        Whether or not to set the model for evaluation only, by just applying mixed precision and
        `torch.compile` (if configured in the `Accelerator` object).

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume a model is defined
>>> model = accelerator.prepare_model(model)
```
r  r   r   r  __func__is_loaded_in_8bitFis_loaded_in_4bithf_device_mapr   a_  You can't train a model that has been loaded in 8-bit or 4-bit precision on multiple devices in any distributed mode. In order to use 8-bit or 4-bit models that have been loaded across multiple GPUs the solution is to use Naive Pipeline Parallelism. Therefore you should not specify that you are under any distributed regime in your accelerate config.r   r   a$  You can't train a model that has been loaded in 8-bit or 4-bit precision on a different device than the one you're training on. Make sure you loaded the model on the correct device using for example `device_map={'':torch.cuda.current_device()}` or `device_map={'':torch.xpu.current_device()}`diska  You can't train a model that has been loaded in 8-bit or 4-bit precision with CPU or disk offload. If you want train the 8-bit or 4-bit model in CPU, please install bitsandbytes with multi-backend, see https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backendzYour model contains `DTensor` parameters, which is incompatible with DDP. Maybe you loaded your model with `device_map='auto'`? Specify `device_map='cuda'` or 'cpu' instead.c              3  8   #    U  H  oR                   v   M     g 7fr"  rJ  )r  r?  s     r  r  ,Accelerator.prepare_model.<locals>.<genexpr>  s     C0B10B   r   ri  )
device_idsoutput_devicer   zModel should undergo tensor parallel before passing it to accelerate.You can use .from_pretrained(..., tp_plan='auto') if the model supportsztp_size in the plugin z# should be same as model's tp size zeFSDP2 preparation should be done via `accelerate.prepare()`, as it requires a model and an optimizer.FullyShardedDataParallel)sharding_strategycpu_offloadauto_wrap_policyr   sync_module_statesbackward_prefetchforward_prefetchuse_orig_paramsparam_init_fnignored_moduleslimit_all_gathers	device_idr  )CheckpointImplapply_activation_checkpointingcheckpoint_wrapper)checkpoint_impl)checkpoint_wrapper_fnr  nometaz, z%Upcasted low precision parameters in z5 because mixed precision turned on in FSDP. Affects: rI  zVFSDP upcast of low precision parameters may affect the precision of model checkpoints.rm  rn  )xr   r   r,   r   r  r  r  r   r   r   r   r   r   forward_original_forwardrO   r   r   r   r   rM  rD   r{  r   r   r   r>   r   r   r   r   r~  r   r   r   r   r  r   indexr   r   rS   r`   tor)  r   rx  rc   r2  
parametersr   r   r5  r  r  r  register_comm_hookr   r   r   2torch.distributed.fsdp.fully_sharded_data_parallelr  rt   	_orig_modr   r   ro  rE   r  r  reshard_after_forwardr  r  mixed_precision_policyr  r  r  r  r  r  rerr  named_modules	fullmatchrp  ;torch.distributed.algorithms._checkpoint.checkpoint_wrapperr  r  r  re  r   NO_REENTRANTr   fsdp_modules_has_params_flat_paramrl  float32rK  moduler   __name__join_fqnsr  _handle_orig_param_dtyperY  r   r   r"  r   fork_launchedxmpMpModelWrapperr   r   r-   rq  rr   )r	  r  r   evaluation_modeautocast_contextmodel_forward_funcnew_forwardmodel_devicescurrent_devicecurrent_device_indexr  r  r  r   is_type_fsdpr   regignoredrV  r  r  r  r  upcasted_logrW  name_param_logname_log	param_logs                               r  r  Accelerator.prepare_model  s
   6 ##44f9N9NRaRfRf9fE" ""5))%%););;

=wG6Qz 
 ??&+mmE#B4??TXTiTij>#7#77wu}}V`?a?a%*]]" 78HI[8\ ]%*]]%;%;"./AB *; > *+B5==CYCY+Z\a b ~0009R9R&u.D.D._H_H_`EE.66'%I\^c:d:djq?Ek
 k
   3 3 : : <=M=!A%$*?*??CUCU*U } 
 ]#q(!%m!4Q!7nell;;+9+?+?(44+0<<+G+M+M(+9(;;##u,1X1Z1Z\\"674;;F))5;OST;T(W 
 -'0W0Y0Y]*/?/A/A]* I  + d&<&<U&C&CHHT[[)E   $*A*AdF]F]FhFhFh$U++$ H  C0@0@0BCCC=A=M=M=YT--779_aFzz~~&DgNRXX;;++u49=9J9J8KT[[M^M^9=9Q9Q8RTXTlTl4>1
!HH--EE*4U[E ''3((;;EB((T-D-D-O-Oui00-b  ==D$;$;$C$CC$01H1H1P1P0QQtuz  vC  vC  uD  E  D  {  &&/*>*>>o  *%6  &u-S*U__d2S  $JJ**??F"&**"8"8K 1F#111K- .9-J-J-okNoNo'2'>'>,7,H,H+6+M+M.9.L.L-8-J-J,7,H,H+6+F+F)4)B)B+6+F+F-8-J-J%)[[F$ "&):";SAA jj0A)BC"$,1,?,?,ALD&"}}T22 &		$++ 6 'v 6	 -B
 5<01 1&1E";;  7!2;2C2C 20>0K0K3 .9-I-I& ''4/#%L"&"3"3E":(  &11$ & 2 2!KK5==8 %V0D D % 3 3 3 /5mm.E.E.N.NPTPYPYZ`ZlZlZrZrPs-tN-\A , 3 3N C
 */u}})EEJ?D}}FNN<M #;T ++3?/Hi$MM"Gz R,,5;a!9 4@ |,q0$MM x
 t||$q(dll2.>$,,rBR.RR(#(R &&/*C*CC9=9I9I))335r))AA%R6R##/$$77>&&/*=*==$**BZBZ**5144T[[A~000T5N5N&u.D.D._H_H_`E::##++}/?/??HZ[`HaHazz''@@'V1I1I1S1S1UV  eTtzz/G/G/Q/Q/STr  c                   ^^ [        5       (       d  [        S5      eU R                  (       a{  U Vs/ s H0  n[        U[        R
                  R                  5      (       d  M.  UPM2     snmU Vs/ s H0  n[        U[        R                  R                  5      (       d  M.  UPM2     nnU Ha  n[        U[        R
                  R                  5      (       d  M.  [        UU R                  R                  U R                  R                  S9  Mc     U R                  (       aS  [        W5      S:  aD  U R                  R                  R                  (       a  SSKJm  US   R#                  UU4S j5        U$ s  snf s  snf )Nzu`torchao` was not found on your system or is too old of a version. Please ensure that `torchao >= 0.6.1` is installed)configmodule_filter_funcr   )(precompute_float8_dynamic_scale_for_fsdpc                    > T" TS   5      $ )Nr   r   )r  r  r  r  s     r  <lambda>)Accelerator._prepare_ao.<locals>.<lambda>  s    (PQWXYQZ([r  )r^   r   r   r   r  r  r  r  r  rC   r   r  r  r   enable_fsdp_float8_all_gathertorchao.float8r  register_step_post_hook)r	  r  x
optimizersrT  r  r  s        @@r  r)  Accelerator._prepare_ao  s   #%% H  ==!%HAAuxx)GaHF%)RTZ5;;;P;P-Q!TJRC#uxx//'1188'+'='='P'P  ==S_q0T5K5K5R5R5p5pOqM11[ ' IRs   -E6#E60-E;!E;c                6   [        5       (       d  [        S5      eSu  p#Su  pEU Vs/ s H  ofPM     nnU Hg  n[        U[        R                  R
                  5      (       a	  UnUS-  nM5  [        U[        R                  R                  5      (       d  M`  UnUS-  nMi     Uc  Uc  U$ Ub  Uc  [        S5      eUS:  d  US:  a  [        SU SU S35      eU R                  U5      n[        R                  " 5          [        U5        S S S 5        U R                  U5      n	UR                  5        V
Vs0 s H
  u  pXU
   _M     nn
nUR                   H  nUS	    Vs/ s H  oU   PM	     snUS	'   M      U$ s  snf ! , (       d  f       N{= fs  snn
f s  snf )
Nzg`transformer_engine` was not found on your system. Please ensure that `transformer_engine` is installedri  r   r   r   zgYou must pass a model and an optimizer together to `accelerate.prepare()` when using TransformerEngine.You can't use multiple models () or optimizers z with TransformerEngine.rk  )r_   r   r   r  r  r  r  r  r   r!  no_gradrB   r0  rs  )r	  r  r  r  
num_modelsnum_optimizersr	  r<  r;  r=  r>  r?  r@  rx  s                 r  r(  Accelerator._prepare_te  s   .00y  &%)"
!%&##&C#uxx//a
C%++"7"799	!#  M%-y  !^~11*=MnM]]uv   55e<]]_%  55e<6F6L6L6NO6Nda1q))6NO$11K9DX9N$O9NAQZ9N$OK! 2 7 '& _ P %Ps   E:>E?6F F?
Fc                   SS K nUR                  nU R                  [        R                  :X  a  SSKJ n  UR                  nU R                  n[        S U 5       5      nUR                  R                  S0 5      R                  SS5      nUS:  an  [        SSS	5      (       d  [        S
5      e[        SS5      (       d  [        S5      eSSKJn  Sn	U" U R                  R                   U4U	4S9U R"                  l        U V
s/ s HH  n
['        U
[(        R*                  R,                  R.                  5      (       a  U R1                  U
SS9OU
PMJ     nn
UR3                  S5      (       a  U(       a  U V
s/ s H"  n
[5        U
S5      (       d  M  U
R6                  PM$     nn
[        S U 5       5      (       a  [9        S5      eU R:                  (       a  U Vs/ s H  oU R<                  -  PM     nnUR>                  (       a  [A        U5      O
[C        U5      n[E        U5      S:  a&  [F        RI                  SUR>                   SU S35        O[9        S5      eURK                  S5      nURM                  SSU RN                  S9  URK                  S5      nXRN                  :w  a,  [F        RQ                  SU RN                   SU S35        Xl'        S SS!.nUb(  UUS'   XRK                  S5      -  U R<                  -  US"'   S nS nS nU H  n
['        U
[(        RR                  RT                  5      (       a  U
nM0  ['        U
[(        RV                  RX                  [Z        45      (       a  U
nMc  ['        U
[\        [^        45      (       d9  [!        U
5      R`                  URb                  Rd                  Rf                  ;   d  M  U
nM     Ub  S#UR                  ;   a   ['        U[Z        5      (       d  [9        S$5      eS#UR                  ;  a   ['        U[Z        5      (       a  [9        S%5      e['        U[(        RV                  RX                  5      (       a  SUR                  S&'   Ubm  S'UR                  ;   a   ['        U[^        5      (       d  [9        S(5      eS'UR                  ;  a-  ['        U[^        5      (       a  URh                  c  [9        S)5      eUb8  Ub5  ['        U[Z        5      (       a   ['        U[^        5      (       d  [9        S*5      eUGb  U R                  [        Rj                  :X  a  [m        UU Rn                  5      nURq                  U5        / S+QnU Vs/ s H  nUR3                  U5      (       d  M  UPM     nn[E        U5      S:  a  S,S-U S.3-   S/-   n[5        US05      (       d  [9        S1U-   5      e[5        URr                  S25      (       a  URr                  Rt                  nOI[5        URr                  S35      (       a   [C        URr                  Rv                  5      nO[9        S4U-   5      eURy                  UU-  [{        S5U-  U-  5      S6U-  S+.5        ['        U[Z        5      (       a(  URy                  UR|                  UR~                  S7.5        ['        U[^        5      (       a  URh                  c  [        UR                  S8S 5      c  [        UR                  S9S 5      OUR                  R                  S9   nURy                  SUUR                  S:.5        UR                  bM  U R:                  (       d-  [        R                  " UR                  U R<                  -  5      OUR                  US;'   UR                  " SPS<S0UD6  UR                  U l        [        UU R                  S=9nUGbB  ['        U[Z        5      (       aA  UR                  US>'   ['        U[^        5      (       a  URh                  b  URh                  US?'   OU R                  S@   R                  SA0 5      R                  SBSC5      SC:w  au  U R                  R                  SDS5      (       aT  U R                  R                   SE:X  a/  [        R                  R                  SFSG5      SG:X  a  [9        SH5      e[        U5      nUUS#'   Ub<  [!        U5      R`                  URb                  Rd                  Rf                  ;   a  UUS?'   U R                  R                   SE:X  a  SI[        R                  SJ'   U" SP0 UD6u  nnnn[        SSSK5      (       a  U R"                  R                  R                  [        R                  :w  a  U R"                  R                  R                  5       nU R"                  R                  R                  (       a  [        UR                  40 UD6  OUR                  UR                  SL5      USM9  Ub  [        U5      nUb0  Uc!  [        UUU R                  U R:                  SN9nO[        UU5      n[        [E        U5      5       H  n['        UU   [(        RR                  RT                  5      (       a  UUU'   M6  ['        UU   [(        RV                  RX                  [Z        45      (       a  UUU'   Mo  ['        UU   [\        [^        45      (       d<  [!        UU   5      R`                  URb                  Rd                  Rf                  ;   d  M  UUU'   M     U R                  c  [        U5      U l\        O[F        RQ                  SO5        U R                  R                  U5        Ub  U R                  R                  U5        Ub  U R                  R                  U5        [        U5      $ s  sn
f s  sn
f s  snf s  snf )QNr   )r   c              3  ~   #    U  H3  n[        U[        R                  R                  R                  5      v   M5     g 7fr"  )r   r  r   r  r  )r  r	  s     r  r  1Accelerator._prepare_deepspeed.<locals>.<genexpr>  s-     #a\`UXJsEKK4D4D4O4O$P$P\`s   ;=tensor_parallelautotp_sizer   r   r   0.16.4bDeepspeed TP requires deepspeed >= 0.16.4, Please update DeepSpeed via `pip install deepspeed -U`.z2.2.0zsTried to use TP, but `torch.distributed.device_mesh` requires PyTorch >= 2.2.0. Please upgrade your PyTorch version)init_device_meshrw  )mesh_dim_namesTrD  train_micro_batch_size_per_gpu
batch_sizec              3  (   #    U  H  oS L v   M
     g 7fr"  r   )r  bss     r  r  r  4  s     8KbTzKs   a,  At least one of the dataloaders passed to `accelerate.prepare()` has `None` as batch size. Please set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.RSince you passed both train and evaluation dataloader, `is_train_batch_min` (here % will decide the `train_batch_size` (r   a  When using DeepSpeed, `accelerate.prepare()` requires you to pass at least one of training or evaluation dataloaders with `batch_size` attribute returning an integer value or alternatively set an integer value in `train_micro_batch_size_per_gpu` in the deepspeed config file or assign integer value to `AcceleratorState().deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu']`.r  F)
must_matchr  zEGradient accumulation steps mismatch: GradientAccumulationPlugin has z, DeepSpeed config has z. Using DeepSpeed's value.      ?)gradient_clippingz;zero_optimization.stage3_gather_16bit_weights_on_model_savetrain_batch_sizer  zYou cannot specify an optimizer in the config file and in the code at the same time. Please remove the optimizer from the config file or create `accelerate.utils.DummyOptim` in the code.zTYou cannot create a `DummyOptim` without specifying an optimizer in the config file.zero_allow_untested_optimizerr  zYou cannot specify a scheduler in the config file and in the code at the same time. Please remove the scheduler from the config file or create `accelerate.utils.DummyScheduler` in the code.zEither specify a scheduler in the config file or pass in the `lr_scheduler_callable` parameter when using `accelerate.utils.DummyScheduler`.zlYou can only specify `accelerate.utils.DummyScheduler` in the code when using `accelerate.utils.DummyOptim`.)z$zero_optimization.reduce_bucket_sizez-zero_optimization.stage3_prefetch_bucket_sizez4zero_optimization.stage3_param_persistence_thresholdzStherefore it's not possible to automatically fill out the following `auto` entries zin the DeepSpeed config file: z . You can fix that by replacing zB`auto` values for these keys with an integer value of your choice.r  z!Can't find `model.config` entry, hidden_sizehidden_sizeszMCan find neither `model.config.hidden_size` nor `model.config.hidden_sizes`, g?
   )zoptimizer.params.lrzoptimizer.params.weight_decaydefaultslr)zscheduler.params.warmup_min_lrzscheduler.params.warmup_max_lrz!scheduler.params.warmup_num_stepsz scheduler.params.total_num_stepsr  )r  config_paramsmodel_parameterslr_schedulerzero_optimizationoffload_optimizerr   nonezero_force_ds_cpu_optimizerr   PT_HPU_LAZY_MODE1zxYou can't use an Offload Optimizer with HPU in Lazy Mode. Please set the environment variable `PT_HPU_LAZY_MODE` to `0`.r   DEEPSPEED_USE_HPUz0.14.4r   )r   compile_kwargsstep_with_optimizerr
  a   A wrapped DeepSpeed engine reference is currently tied for this `Accelerator()` instance. If you want to call `accelerator.backward()` referencing a new model/engine, please create a separate `Accelerator()` instance and call `accelerator.prepare()` on it.r   )cr   
initializer   r   r   msampr   r2  deepspeed_configr   rA   r   r\   torch.distributed.device_meshr  r   r   r   ds_device_meshr   r  r   r  r  r  is_autorM  r  r   r
  r.  is_train_batch_minminmaxr   r   r   	get_value
fill_matchr  r$  r  r  r  r  rx   r   ry   r  runtimelr_schedulesVALID_LR_SCHEDULESlr_scheduler_callabler   r>   r   set_moe_leaf_modulesr  r  r  r  r   r  weight_decayr   r  r  warmup_num_stepstotal_num_stepsmathceildeepspeed_config_processr   rk  r   r   rz   r   r   r-   r   r   rq  rs   r  rr  poprv   r   r   rw   ranger   ru   r  r  r   r  r.  ) r	  r  r   ds_initializemsamp_deepspeedr   is_dataloader_presentr   r  mesh_dim_namer	  r<  batch_sizesr  batch_size_per_device%deepspeed_gradient_accumulation_stepsconfig_kwargsr  r  r  hidden_size_based_keysr  hidden_size_auto_keys	reasoningr  max_lrr  enginer4  r  r  r9  s                                    r  r*  Accelerator._prepare_deepspeed  s
   !,,~333:+66M00 ##a\`#a a"33778I2NRRS`bcdQ;#Kx@@!x  $D'22! J  G M(89I9IG:gtfv(wDJJ% 
 8B#u{{GWGWGbGb7c7cDcd3ill 	 

 ##$DEE$9=\#lA[~s~~\8K888$O 
 %%Va"bVa
1C1C#CVaK"b<L<_<_K(8ehiteu%{#a'KKl+>>??dezd{{}
 !K  %5$>$>?_$`! 	##)(,(H(H 	$ 	
 1A0J0JKh0i-04T4TTNNWX\XxXxWy z((M'NNhj 0U, "%KP

 !,>SM:;%(B(BC`(aadhdvdvv ,- 		C#uxx//C%++"7"7!DEE	S;"?@@S	""i&7&7&D&D&W&WW	   .???
S\_iHkHk H 
 $4$E$EE*U^akJmJm j  )ekk&;&;==UY 112QR .???
S\_mHoHo L  #3#D#DDy>;;33; r 
  Y%:)j22:iR`;b;b 5 
 >#4#44*5$2I2IJ11%8&"
 1G$f0F1JZJbJbcdJeQ0F!$f()A-i67L6MMmnoZ[ 
 uh//$%H9%TUU5<<77"',,":":KU\\>::"%ell&?&?"@K$gjss  $$@Kk@YILSS^M^alMlImPRU`P` )j22$$,5LL[d[q[qr )n669;Z;Z;b y22JEM I//t<",,55d; 
 $$:;:@=F=W=W ,,8  $11 		)";";d>P>P"PQ&66 ""DE
 55XX-X$4$E$ED!T5J5JKF$i*661:1A1AF-.!)n>>9CbCbCn1:1P1P~.,,-@AEEFY[]^bb & $($9$9$=$=>[]a$b$b;;++u4HZ\_9`dg9g",!a# 
 %C9$M	*3F;' ,	?33y7H7H7U7U7h7hh5>F>2{{5( 39

./1>1H1H.FIq,T8<<AYAYAaAaereueuAu!%!9!9!C!C!E::++DD-fmmN~NNN>+=+=i+HYgNh$5i@	$' 4!!,0,N,N&*&8&8	!I !:,	 RI3v;'fQi99 &F1Iq	EKK,A,A:+NOO )F1I [.,IJJO,,	0A0A0N0N0a0aa )F1I ( ,,40Fv0N-p
 LL'$  ''	2$  ''	2V}q
 ] #cZ %gs%   An:5n?n?oo	;o	c                |	   U R                   R                  nS nUR                  (       d  U Vs/ s H"  n[        US5      (       d  M  UR                  PM$     nn[        U5      S:X  a  [        S5      eUR                  (       a  [        U5      O
[        U5      n[        U5      S:  a&  [        R                  SUR                   SU S35        O/U H)  n[        U[        5      (       d  M  UR                  S   n  O   Ub8  U R                  UR                   UR"                  -  -  nUR%                  X65        O[        S	5      eS nS nS n	S n
U H  n[        U[&        R(                  R*                  R,                  5      (       a  U
c  [/        [1        U5      5      n
MO  [        U[&        R2                  R4                  5      (       a  UnM|  [        U[&        R6                  R8                  5      (       a  UnM  [        U[:        [<        45      (       d  M  Un	M     Ub  UR?                  Xz5        Ub  URA                  U5        U	b1  [        U	[<        5      (       d  [        S
5      eURC                  U	5        [E        XRF                  S9  [I        U 5      u  pxn	U RK                  5         Sn/ nU H  n[        U[&        R(                  R*                  R,                  5      (       a!  URM                  [O        X5      5        US-  nMW  [        U[        5      (       a<  US:X  a  URQ                  5         [O        X5      nURM                  WU   5        US-  nM  URM                  U5        M     Ub  [S        XX5      nUb  [U        U5      nU	b  [W        X5      n	[Y        [        U5      5       H  n[        X   [&        R2                  R4                  5      (       a  X|U'   M4  [        X   [&        R6                  R8                  5      (       a  XU'   Me  [        X   [<        5      (       d  M~  XU'   M     Ub?  U RZ                  RM                  U5        [        U RZ                  5      S:  a  []        S5      eUb  U R^                  RM                  U5        U	b  U R`                  RM                  U	5        [c        U5      $ s  snf )Nr  r   zfYou must specify a training or evaluation dataloader in `accelerate.prepare()` when using Megatron-LM.r   r  r  r   micro_batch_sizezWhen you do not pass the dataloader parameter, the `data_parallel_size`, `micro_batch_size`, and `global_batch_size` megatron parameters will not be updated.zvYou can't use a custom scheduler with Megatron-LM. Please use the `accelerate.utils.MegatronLMDummyScheduler` instead.)args_defaultszWYou can't use same `Accelerator()` instance with multiple models when using Megatron-LM)2r   r   megatron_dataset_flagrM  r  r   r   r  r  r  r   r   r   r|   dataset_argsr.  	tp_degree	pp_degreeset_training_argsr  r   r  r  nextiterr  r  r  r  r   r}   set_network_size_argsset_optimizer_typeset_scheduler_argsr   megatron_lm_default_argsr   rl   r  r   set_megatron_data_argsr{   r~   r   r  r  r  r   r  r.  )r	  r  r   r&  r	  r  	dp_degreer  r  r  
batch_datacounterr<  dataloadersr9  s                  r  r+   Accelerator._prepare_megatron_lm&	  s   !ZZ::!7759XTcWS,=W>3>>TKX;1$ |  4F3X3Xs;/^abm^n;!#h)<<==bcsbttvx
 c#<=='*'7'78J'K$  '**/A/K/KN`NjNj/jkI001AMg  		
C#u{{//::;;
@R!$s)_
C11C%++"7"799	C+/G!HII	  44UG 11)< i)ABB  M  11)< 	t3^3^_(UVZ([%9 C#u{{//::;;=dHI1C!:;;a<..0"A$"LKk'231c"  "4	EE 29=I 29HIs6{#A&)UXX__55!q	FIu{{'<'<==%q	FI'?@@%q	 $ LL&4<< 1$$m   ##I. ##I.V}C Ys   R9R9c           	        [        5       (       a  SSKnO[        S5      e/ n/ nU Vs/ s H  oUPM     nn[        U5       H  u  pu[	        U[
        R                  R                  5      (       a&  UnUR                  5         UR                  Xx45        MT  [	        U[
        R                  R                  5      (       d  M  UR                  Xu45        M     [        U5      S:  d  [        U5      S:  a  [        U5      S:X  a  [        S5      e[        U5      S:X  a  [        U5      S:X  a  U$ U R                  R                  S:X  a  [
        R                   OSn	[        U5      S:  a  [        U5      S:X  a  U H  u  pxU R"                  R$                  S:X  d  M!  ['        UR)                  5       5      R"                  R$                  S:X  d  MT  UR+                  U R"                  5      nUR-                  USU	S	S
S9u  pXU'   M     [        U5      S:X  a  [        U5      S:X  a  US   u  pUS   u  pU R"                  R$                  S:X  aL  ['        UR)                  5       5      R"                  R$                  S:X  a  UR+                  U R"                  5      nUR-                  XU	S	S
S9u  pXU'   XU'   [/        U5      $ s  snf )z
Prepares model and optimizer for training with IPEX on CPU/XPU. This covers 3 cases, IPEX compiled with CPU
only support, IPEX compiled with XPU support and training with XPU pytorch backend available in stock pytorch
starting from version 2.4.
r   NzIPEX is not installed or IPEX's version does not match current PyTorch version. Please refer to https://github.com/intel/intel-extension-for-pytorch.r   z]Prepare with IPEX expects either 1+ models and no optimizer OR a single model-optimizer pair.r   r   r   TO1)r  rl  inplacelevel)rU   intel_extension_for_pytorchr   r  r   r  r  r  trainr  r  r  r   r   r   r   bfloat16r   r   r-  r  r  optimizer.  )r	  r  ipexr  r  r	  r<  r9  r  rl  r4  i_modeli_optimizerr  s                 r  r%  Accelerator._prepare_ipex	  sT    6L 
 
!%&##&'FA#uxx//qj)C%++"7"799!!1(+ ( z?Q3v;?s:!7Ko 
 v;!J1 4M"&**"<"<"FDv;?s:!3";;##u,e6F6F6H1I1P1P1U1UY^1^!HHT[[1E#}}Ud%Y]ei}jHE %1I # v;!J1 4#AYNG%/]"K{{5(T%2B2B2D-E-L-L-Q-QUZ-Z-#}}Uu^bjn}oE#7O"+;V}Q 's   J>c                    U R                   [        R                  :X  a1  [        U R                  S5      (       a  U R                  R
                  $ U R                  $ )z|
Prepare the device mesh for distributed training. The dataloader will determine how to load data based on the
device mesh.
r  )r   r,   r   rM  r   r  rk  r  s    r  _prepare_device_mesh Accelerator._prepare_device_mesh	  sE    
   O$=$=='$**VfBgBg::,,,)))r  c                  [        5       (       d  [        S5      eSS KnSu  pES nSu  pxU V	s/ s H  oPM     n
n	[        U
5       Hk  u  p[	        U	[
        R                  R                  5      (       a	  U	nUS-  nM7  [	        U	[
        R                  R                  5      (       d  Mb  U	nUnUS-  nMm     Uc  Uc  X4$ Ub  Uc  [        S5      eUS:  d  US:  a  [        SU SU S	35      eU R                  b  U R                  R                  nOU R                  R                  nUR                  XEUS
9u  pE[        [!        U
5      5       He  n[	        X   [
        R                  R                  5      (       a  XJU'   M4  [	        X   [
        R                  R                  5      (       d  Ma  XZU'   Mg     Ub  SX'   [#        U
5      U4$ s  sn	f )NzMS-AMP was not found on your system. Please ensure that MS-AMP is available  or choose `'te'` as the backend for FP8 mixed precision training.r   ri  r  r   z\You must pass a model and an optimizer together to `accelerate.prepare()` when using MS-AMP.r  r  z with MS-AMP.)	opt_levelF)rY   r   r   r  r   r  r  r  r  r  r   r   rI  r   r  r  r   r.  )r	  r   r  r   r  r  optimizer_indexr  r  r	  r<  r9  rI  s                r  r-  Accelerator._prepare_msamp	  s   !##U 
 	%%)"
!%&##&'FA#uxx//a
C%++"7"799	"#!# ( ++%-n  !^~11*=MnM]]jk 
 &&2 33==	 55??	$//I/VEs6{#A&)UXX__55!q	FI(=(=??%q		 $
 &05-V}...G 's   G	c                p   [        USS5      (       a,  XR                  ;  a  U R                  R                  U5        U$ Uc,  U R                  [        R
                  :w  a  U R                  OSnU R                  5       n[        UU R                  U R                  U R                  U R                  UU R                  R                  5       U R                  U R                   UU R"                  U R$                  R&                  U R(                  U R*                  US9nU R                  R                  U5        U$ )a  
Prepares a PyTorch DataLoader for training in any distributed setup. It is recommended to use
[`Accelerator.prepare`] instead.

Args:
    data_loader (`torch.utils.data.DataLoader`):
        A vanilla PyTorch DataLoader to prepare
    device_placement (`bool`, *optional*):
        Whether or not to place the batches on the proper device in the prepared dataloader. Will default to
        `self.device_placement`.
    slice_fn_for_dispatch (`Callable`, *optional*`):
        If passed, this function will be used to slice tensors across `num_processes`. Will default to
        [`~utils.slice_tensors`]. This argument is used only when `dispatch_batches` is set to `True` and will
        be ignored otherwise.

Example:

```python
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> data_loader = torch.utils.data.DataLoader(...)
>>> data_loader = accelerator.prepare_data_loader(data_loader, device_placement=True)
```
r3  F)r.  r2  r
  put_on_devicer  r<  r@  slice_fn_for_dispatchrF  	data_seedrI  rL  rk  )r   r  r  r   r,   r   r   rF  r   r   r.  r2  r
  r  copyr<  r@  rF  r   rO  rI  rL  )r	  data_loaderr   rN  r   prepared_data_loaders         r  r   Accelerator.prepare_data_loader
  s   < ; 95AA"3"33!!((5#8<8M8MQ`QdQd8dt44jo//12KK,,,,,,*nn))+!22**"7!%!:!:,,66**$($@$@) 
" 	  !56##r  c                   [        5       (       a(  SSKJnJn  U =R                  [        XU45      -  sl        [        USS5      (       a,  XR                  ;  a  U R                  R                  U5        U$ Uc  U R                  nU R                  [        R                  :X  a  SOU R                  n[        XUS9nU R                  R                  U5        U$ )a^  
Prepares a PyTorch Optimizer for training in any distributed setup. It is recommended to use
[`Accelerator.prepare`] instead.

Args:
    optimizer (`torch.optim.Optimizer`):
        A vanilla PyTorch optimizer to prepare
    device_placement (`bool`, *optional*):
        Whether or not to place the optimizer on the proper device. Will default to `self.device_placement`.

Example:

```python
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> optimizer = torch.optim.Adam(...)
>>> optimizer = accelerator.prepare_optimizer(optimizer, device_placement=True)
```
r   AdaLomoLomor3  FN)r   r   )rV   
lomo_optimrV  rW  r   r   r   r   r  r   r   r   r   r   r   )r	  r  r   rV  rW  r   s         r  r  Accelerator.prepare_optimizer?
  s    ,  1 ##z)G_'MM# 97?? 0 00  ''	2##44 ))^-A-AAt{{(^de		*r  c                t   [        USS5      (       a,  XR                  ;  a  U R                  R                  U5        U$ U R                  nU R                   H"  n[        USS5      UR                  :X  d  M   Un  O   [        UUU R                  U R                  S9nU R                  R                  U5        U$ )a   
Prepares a PyTorch Scheduler for training in any distributed setup. It is recommended to use
[`Accelerator.prepare`] instead.

Args:
    scheduler (`torch.optim.lr_scheduler.LRScheduler`):
        A vanilla PyTorch scheduler to prepare

Example:

```python
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> optimizer = torch.optim.Adam(...)
>>> scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, ...)
>>> scheduler = accelerator.prepare_scheduler(scheduler)
```
r3  Fr  Nr  )r   r  r  r   r  r   r   r
  )r	  r  r  opts       r  r  Accelerator.prepare_schedulerk
  s    , 97?? 0 00  ''	2$$	##Cy+t4E	 $ ) $ B B,,	
	 		*r  c                .   UR                  S5      nU R                  [        R                  :w  a  XR                  -  nU R                  [        R                  :X  a*  U R
                  R                  " U4SU R                  0UD6  gU R                  [        R                  :X  a  gU R                  b,  U R                  R                  U5      R                  " S0 UD6  gUb#  U R                  (       a  U R                  X5        gUR                  " S0 UD6  g)a  
Scales the gradients in accordance to the `GradientAccumulationPlugin` and calls the correct `backward()` based
on the configuration.

Should be used in lieu of `loss.backward()`.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(gradient_accumulation_steps=2)
>>> outputs = model(inputs)
>>> loss = loss_fn(outputs, labels)
>>> accelerator.backward(loss)
```
learning_rater  Nr   )r   r   r,   r   r  r   backwardr  r   r   scaler   lomo_backward)r	  lossr  r^  s       r  r_  Accelerator.backward
  s    $ 

?3  O$=$==:::D  O$=$==))224fH[H[f_ef""o&A&AA[[$KKd#,,6v6&4+B+Bt3MM#F#r  c                L    [         R                  " SU R                  S9U l        g)a[  
Sets the internal trigger tensor to 1 on the current process. A latter check should follow using this which
will check across all processes.

Note:
    Does not require `wait_for_everyone()`

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume later in the training script
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
>>> # e.g. when the loss is NaN
>>> if should_do_breakpoint(loss):
...     accelerator.set_trigger()
>>> # Assume later in the training script
>>> if accelerator.check_breakpoint():
...     break
```
r   r   N)r  tensorr   r  r  s    r  set_triggerAccelerator.set_trigger
  s    0 !<<$++>r  c                   U R                   c$  [        R                  " SU R                  S9U l         U R	                  U R                   5      nUR                  5       S:  a%  [        R                  " SU R                  S9U l         gg)aY  
Checks if the internal trigger tensor has been set to 1 in any of the processes. If so, will return `True` and
reset the trigger tensor to 0.

Note:
    Does not require `wait_for_everyone()`

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume later in the training script
>>> # `should_do_breakpoint` is a custom function to monitor when to break,
>>> # e.g. when the loss is NaN
>>> if should_do_breakpoint(loss):
...     accelerator.set_trigger()
>>> # Assume later in the training script
>>> if accelerator.check_trigger():
...     break
```
r   re  r   TF)r  r  rf  r   rg   r  )r	  r  s     r  check_triggerAccelerator.check_trigger
  si    2 #$||AdkkBDkk$"2"23"$||AdkkBDr  c                ^   U R                   (       a  U R                  S:X  a  Uc  U R                  nO[        U[        [
        45      (       d  U/nU HV  n[        U[        5      (       a#  UR                  n[        U[        5      (       a  M#  U R                  R                  U5        MX     ggg)a  
Unscale the gradients in mixed precision training with AMP. This is a noop in all other settings.

Likely should be called through [`Accelerator.clip_grad_norm_`] or [`Accelerator.clip_grad_value_`]

Args:
    optimizer (`torch.optim.Optimizer` or `list[torch.optim.Optimizer]`, *optional*):
        The optimizer(s) for which to unscale gradients. If not set, will unscale gradients on all optimizers
        that were passed to [`~Accelerator.prepare`].

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model, optimizer = accelerator.prepare(model, optimizer)
>>> outputs = model(inputs)
>>> loss = loss_fn(outputs, labels)
>>> accelerator.backward(loss)
>>> accelerator.unscale_gradients(optimizer=optimizer)
```
r   N)
r   r   r   r   r.  r   r   r  r   unscale_)r	  r  r[  s      r  unscale_gradientsAccelerator.unscale_gradients
  s    0 ??t33v=  ,,		E4=99&K	  &:;;--C !&:;;$$S) !  >?r  c                   U R                   [        R                  :X  a  U R                  5         U Vs/ s H  oDPM     nnU R                   Ht  nXR                  5        Vs/ s H  oDPM     sn:X  d  M(  U R                  (       d  UR                  X#5      s  $ [        R                  R                  R                  XUS9s  $    GOU R                   [        R                  :X  a(  U R                  b  U R                  R                  5       $ gU R                   [        R                  :X  GaP  U R                   H  nUR                   R"                  (       a  M   Un[%        U[&        5      (       a#  UR(                  n[%        U[&        5      (       a  M#  [*        R,                  " U5      n[*        R.                  " SUSU R0                  -  S9  SUR                   l        M     [2        R4                  R7                  SS5      R9                  5       S	:X  aj  U R                  5         U Vs/ s H  oDPM     nnU R                   H9  nXR                  5        Vs/ s H  oDPM     sn:X  d  M(  UR                  X#5      s  $    U R                  5         [        R                  R                  R                  XUS9$ s  snf s  snf s  snf s  snf )
a  
Should be used in place of `torch.nn.utils.clip_grad_norm_`.

Returns:
    `torch.Tensor`: Total norm of the parameter gradients (viewed as a single vector).

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(gradient_accumulation_steps=2)
>>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)

>>> for input, target in dataloader:
...     optimizer.zero_grad()
...     output = model(input)
...     loss = loss_func(output, target)
...     accelerator.backward(loss)
...     if accelerator.sync_gradients:
...         accelerator.clip_grad_norm_(model.parameters(), max_grad_norm)
...     optimizer.step()
```
)	norm_typeNsumr  )r`  Tr   r   r   )r   r,   r   rn  r  r  r   clip_grad_norm_r  r  r   r   r   get_global_grad_normr   r   r   is_xla_gradients_syncedr   r   r  xm_fetch_gradients
all_reducer.  r   r   r   r   )	r	  r  max_normrq  r?  r  acc_optr[  	gradientss	            r  rs  Accelerator.clip_grad_norm_  s9   2   O$8$88""$%/0Z!ZJ0-=-=-?!@-?!-?!@@==$44XII$xx~~==&I  >    & ""o&?&??,,844IIKK""o&9&99++--EEE!C$S*>??!mm %S*>?? " 3 3C 8I MM%#@R@R:RSEIG**B , zz~~3W=CCEO&&()34Aa
4!\\E!1A1A1C%D1CAa1C%DD$44XII * 	 xx~~--ji-XXE 1!@6 5%Ds   J3!J8%J=Kc                    U R                   [        R                  [        R                  4;   a  [	        S5      eU R                  5         [        R                  R                  R                  X5        g)a_  
Should be used in place of `torch.nn.utils.clip_grad_value_`.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(gradient_accumulation_steps=2)
>>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)

>>> for input, target in dataloader:
...     optimizer.zero_grad()
...     output = model(input)
...     loss = loss_func(output, target)
...     accelerator.backward(loss)
...     if accelerator.sync_gradients:
...         accelerator.clip_grad_value_(model.parameters(), clip_value)
...     optimizer.step()
```
zUDeepSpeed and FSDP  do not support `clip_grad_value_`. Use `clip_grad_norm_` instead.N)
r   r,   r   r   	Exceptionrn  r  r  r   clip_grad_value_)r	  r  
clip_values      r  r  Accelerator.clip_grad_value_S  sQ    ,   _%>%>@T@T$UUstt ''
?r  c                    [        U5      $ )a  
Gather the values in *tensor* across all processes and concatenate them on the first dimension. Useful to
regroup the predictions from all processes when doing evaluation.

Note:
    This gather happens in all processes.

Args:
    tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
        The tensors to gather across all processes.

Returns:
    `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`: The gathered tensor(s). Note that the
    first dimension of the result is *num_processes* multiplied by the first dimension of the input tensors.

Example:

```python
>>> # Assuming four processes
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> process_tensor = torch.tensor([accelerator.process_index], device=accelerator.device)
>>> gathered_tensor = accelerator.gather(process_tensor)
>>> gathered_tensor
tensor([0, 1, 2, 3])
```
)rK   )r	  rf  s     r  rK   Accelerator.gathern  s    < f~r  c                  ^   [        S USS9  SnU=(       d    U(       + nU(       a  [        U5      nOT R                  U5      n T R                  R
                  (       am  T R                  R                  S:X  a  [        R                  S5        U$ T R                  R                  S:  a   U 4S jnU(       a  U" U5      $ [        XT5      $ U$ U$ ! [         a    Sn Nf = f! [         a    Us $ f = f)	a  
Gathers `input_data` and potentially drops duplicates in the last batch if on a distributed system. Should be
used for gathering the inputs and targets for metric calculation.

Args:
    input (`torch.Tensor`, `object`, a nested tuple/list/dictionary of `torch.Tensor`, or a nested tuple/list/dictionary of `object`):
        The tensors or objects for calculating metrics across all processes
    use_gather_object(`bool`):
        Whether to forcibly use gather_object instead of gather (which is already done if all objects passed do
        not contain tensors). This flag can be useful for gathering tensors with different sizes that we don't
        want to pad and concatenate along the first dimension. Using it with GPU tensors is not well supported
        and inefficient as it incurs GPU -> CPU transfer since tensors would be pickled.

Example:

```python
>>> # Assuming two processes, with a batch size of 5 on a dataset with 9 samples
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> dataloader = torch.utils.data.DataLoader(range(9), batch_size=5)
>>> dataloader = accelerator.prepare(dataloader)
>>> batch = next(iter(dataloader))
>>> gathered_items = accelerator.gather_for_metrics(batch)
>>> len(gathered_items)
9
```
c                    U $ r"  r   )r  s    r  r  0Accelerator.gather_for_metrics.<locals>.<lambda>  s    r  T)error_on_other_typeFrn  zcThe used dataset had no length, returning gathered tensors. You should drop the remainder yourself.r   c                6   > U S TR                   R                   $ r"  )r   	remainder)rf  r	  s    r  _adjust_samples7Accelerator.gather_for_metrics.<locals>._adjust_samples  s    %&E(;(;(E(EFFr  )
rf   r   rL   rK   r   r  r  r   r   r~  )r	  
input_datause_gather_objectall_tensorsr  r  s   `     r  gather_for_metricsAccelerator.gather_for_metrics  s    >	 k:4PK .@[ ,D;;z*D	""44 &&00B6KK}  K((22Q6G ).t440GG  K E  	 K	 F  	K	s<   C AC" .C" 
C" C" C" CC"C10C1c                    [        XU5      $ )a9  
Reduce the values in *tensor* across all processes based on *reduction*.

Note:
    All processes get the reduced value.

Args:
    tensor (`torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`):
        The tensors to reduce across all processes.
    reduction (`str`, *optional*, defaults to "sum"):
        A reduction type, can be one of 'sum', 'mean', or 'none'. If 'none', will not perform any operation.
    scale (`float`, *optional*, defaults to 1.0):
        A default scaling value to be applied after the reduce, only valid on XLA.

Returns:
    `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
        The reduced tensor(s).

Example:

```python
>>> # Assuming two processes
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> process_tensor = torch.arange(accelerator.num_processes) + 1 + (2 * accelerator.process_index)
>>> process_tensor = process_tensor.to(accelerator.device)
>>> reduced_tensor = accelerator.reduce(process_tensor, reduction="sum")
>>> reduced_tensor
tensor([4, 6])
```
)rg   )r	  rf  	reductionr`  s       r  rg   Accelerator.reduce  s    D f//r  c                    [        XX4S9$ )a  
Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so
they can safely be gathered.

Args:
    tensor (nested list/tuple/dictionary of `torch.Tensor`):
        The data to gather.
    dim (`int`, *optional*, defaults to 0):
        The dimension on which to pad.
    pad_index (`int`, *optional*, defaults to 0):
        The value with which to pad.
    pad_first (`bool`, *optional*, defaults to `False`):
        Whether to pad at the beginning or the end.

Returns:
    `torch.Tensor`, or a nested tuple/list/dictionary of `torch.Tensor`:
        The padded tensor(s).

Example:

```python
>>> # Assuming two processes, with the first processes having a tensor of size 1 and the second of size 2
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> process_tensor = torch.arange(accelerator.process_index + 1).to(accelerator.device)
>>> padded_tensor = accelerator.pad_across_processes(process_tensor)
>>> padded_tensor.shape
torch.Size([2])
```
)dim	pad_index	pad_first)rd   )r	  rf  r  r  r  s        r  rd    Accelerator.pad_across_processes  s    B $Fy^^r  c                    [        XU5      $ )ar  
Unwraps the `model` from the additional layer possible added by [`~Accelerator.prepare`]. Useful before saving
the model.

Args:
    model (`torch.nn.Module`):
        The model to unwrap.
    keep_fp32_wrapper (`bool`, *optional*, defaults to `True`):
        Whether to not remove the mixed precision hook if it was added.
    keep_torch_compile (`bool`, *optional*, defaults to `True`):
        Whether to not unwrap compiled model if compiled.
Returns:
    `torch.nn.Module`: The unwrapped model.

Example:

```python
>>> # Assuming two GPU processes
>>> from torch.nn.parallel import DistributedDataParallel
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model = accelerator.prepare(MyModel())
>>> print(model.__class__.__name__)
DistributedDataParallel

>>> model = accelerator.unwrap_model(model)
>>> print(model.__class__.__name__)
MyModel
```
)rF   )r	  r  keep_fp32_wrapperkeep_torch_compiles       r  unwrap_modelAccelerator.unwrap_model  s    @ +5EWXXr  c                    [        5         g)a^  
Will stop the execution of the current process until every other process has reached that point (so this does
nothing when the script is only run in one process). Useful to do before saving a model.

Example:

```python
>>> # Assuming two GPU processes
>>> import time
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> if accelerator.is_main_process:
...     time.sleep(2)
>>> else:
...     print("I'm waiting for the main process to finish its sleep...")
>>> accelerator.wait_for_everyone()
>>> # Should print on every process at the same time
>>> print("Everyone is here")
```
N)rl   r  s    r  rl   Accelerator.wait_for_everyone@  s
    , 	r  c                   U R                    H  n[        [        U5      [        5      (       a  U R                  R                  U5        M>  [        [        U5         nUR                  (       aG  U R                  R                  U" XR                  40 UR                  [        U5      0 5      D65        M  U R                  R                  U" U40 UR                  [        U5      0 5      D65        M     U R                   H  nUR                  5         M     Ub%  U R                   H  nUR                  U5        M     gg)a]  
Initializes a run for all trackers stored in `self.log_with`, potentially with starting configurations

Args:
    project_name (`str`):
        The name of the project. All trackers will save their data based on this
    config (`dict`, *optional*):
        Optional starting configuration to be logged.
    init_kwargs (`dict`, *optional*):
        A nested dictionary of kwargs to be passed to a specific tracker's `__init__` function. Should be
        formatted like so:
        ```python
        {"wandb": {"tags": ["tag_a", "tag_b"]}}
        ```

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(log_with="tensorboard")
>>> accelerator.init_trackers(
...     project_name="my_project",
...     config={"learning_rate": 0.001, "batch_size": 32},
...     init_kwargs={"tensorboard": {"flush_secs": 60}},
... )
```
N)r   
issubclassr   r   r   r  r   r   requires_logging_directoryr   r   startstore_init_configuration)r	  project_namer  init_kwargstrackertracker_inits         r  init_trackersAccelerator.init_trackersX  s    < }}G$w-88$$W-3CLA::MM(($\3C3Ci{WZ[bWcegGhi MM((l)hkooVYZaVbdfFg)hi % }}GMMO % ==008 ) r  c                    [        U R                  5      S:  aJ  U R                   H,  nUR                  U:X  d  M  U(       a  UR                  s  $ Us  $    [	        U S35      e[        SS9$ )ax  
Returns a `tracker` from `self.trackers` based on `name` on the main process only.

Args:
    name (`str`):
        The name of a tracker, corresponding to the `.name` property.
    unwrap (`bool`):
        Whether to return the internal tracking mechanism or to return the wrapped tracker instead
        (recommended).

Returns:
    `GeneralTracker`: The tracker corresponding to `name` if it exists.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(log_with="tensorboard")
>>> accelerator.init_trackers("my_project")
>>> tensorboard_tracker = accelerator.get_tracker("tensorboard")
```
r   z= is not an available tracker stored inside the `Accelerator`.T)_blank)r   r   rV  r  r   r   )r	  rV  unwrapr  s       r  get_trackerAccelerator.get_tracker  s`    0 t}}!==<<4'.47??A'A ) v%bcddT**r  c           	         U R                    H2  nUR                  " U4SU0UR                  UR                  0 5      D6  M4     g)a  
Logs `values` to all stored trackers in `self.trackers` on the main process only.

Args:
    values (`dict`):
        Values should be a dictionary-like object containing only types `int`, `float`, or `str`.
    step (`int`, *optional*):
        The run step. If included, the log will be affiliated with this step.
    log_kwargs (`dict`, *optional*):
        A nested dictionary of kwargs to be passed to a specific tracker's `log` function. Should be formatted
        like so:
        ```python
        {"wandb": {"tags": ["tag_a", "tag_b"]}}
        ```

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(log_with="tensorboard")
>>> accelerator.init_trackers("my_project")
>>> accelerator.log({"loss": 0.5, "accuracy": 0.9})
```
r   N)r   logr   rV  )r	  r   r   
log_kwargsr  s        r  r  Accelerator.log  s7    6 }}GKKNTNZ^^GLL"-MN %r  c                ~    U R                    H  nUR                  5         M     U R                  R                  5         g)a  
Runs any special end training behaviors, such as stopping trackers on the main process only or destoying
process group. Should always be called at the end of your script if using experiment tracking.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(log_with="tensorboard")
>>> accelerator.init_trackers("my_project")
>>> # Do training
>>> accelerator.end_training()
```
N)r   finishr   destroy_process_group)r	  r  s     r  end_trainingAccelerator.end_training  s.      }}GNN % 	

((*r  c                D    [        UUU R                  R                  US9  g)ak  
Save the object passed to disk once per machine. Use in place of `torch.save`.

Args:
    obj (`object`): The object to save.
    f (`str` or `os.PathLike`): Where to save the content of `obj`.
    safe_serialization (`bool`, *optional*, defaults to `False`): Whether to save `obj` using `safetensors`

Note:
    If `save_on_each_node` was passed in as a `ProjectConfiguration`, will save the object once per node,
    rather than only once on the main node.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> arr = [0, 1, 2, 3]
>>> accelerator.save(arr, "array.pkl")
```
save_on_each_nodesafe_serializationN)ri   r   r  )r	  r	  fr  s       r  ri   Accelerator.save  s$    . 	"88JJ1		
r  c           	     V   [         R                  R                  U5      (       a  [        R	                  SU S35        g[        S UR                  5        5       5      (       a  [        U5      nOA[        S UR                  5        5       5      (       a  [        S5      eU R                  U5      nUc  g[         R                  " USS9  U(       a  [        U5      nU(       a  [        O[        nU(       a  [        O[         n[#        XWUS	9n[         R$                  " U5       H  n	[         R                  R'                  X)5      n
UR)                  S
S5      nU	R)                  S
S5      n[*        R,                  " S5      nU	R/                  U5      (       d  Mt  [         R                  R                  U
5      (       d  M  XR0                  R3                  5       ;  d  M  UR5                  U5      c  M  [7        5       R8                  (       d  M  [         R:                  " U
5        GM     UR0                  R=                  5        HG  u  pU Vs0 s H  oX_   _M	     nnU R?                  U[         R                  R'                  X)5      US9  MI     UR@                  (       a  URB                  URD                  S.nU(       a  [F        O[H        n[         R                  R'                  UU5      n[K        USSS9 n[L        RN                  " USSS9S-   nURQ                  U5        SSS5        [        RS                  SU S[U        UR0                  5       SU S35        g[         R                  R'                  U[        5      n[        RS                  SU 35        gs  snf ! , (       d  f       N= f)aq  
Save a model so that it can be re-loaded using load_checkpoint_in_model

Arguments:
    model: (`torch.nn.Module`):
        Model to be saved. The model can be wrapped or unwrapped.
    save_directory (`str` or `os.PathLike`):
        Directory to which to save. Will be created if it doesn't exist.
    max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
        The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
        lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).

        <Tip warning={true}>

        If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
        which will be bigger than `max_shard_size`.

        </Tip>

    safe_serialization (`bool`, *optional*, defaults to `True`):
        Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model = ...
>>> accelerator.save_model(model, save_directory)
```
zProvided path (z#) should be a directory, not a fileNc              3  8   #    U  H  n[        U5      v   M     g 7fr"  )rQ   )r  r  s     r  r  )Accelerator.save_model.<locals>.<genexpr>)  s     J/#F++/r  c              3  f   #    U  H'  oR                   [        R                   " S 5      :H  v   M)     g7f)r  N)r   r  )r  rW  s     r  r  r  ,  s"     XEWE<<5<<#77EWs   /1zFYou can't save the model since some parameters are on the meta device.Texist_ok)filename_patternmax_shard_sizez.bin z(.*?)-\d{5}-of-\d{5})r  )metadata
weight_mapwzutf-8)encodingr   )indent	sort_keys
z:The model is bigger than the maximum size per checkpoint (z) and is going to be split in z^ checkpoint shards. You can find where each parameters has been saved in the index located at rI  zModel weights saved in )+r   pathisfiler   errorr2  modulesrq   r  rz  get_state_dictmakedirsr@   r"   r%   r#   r&   r   listdirr  replacer  rr  
startswithfilename_to_tensorskeysr  r   rY  remover0  ri   
is_shardedr  tensor_to_filenamer!   r$   openjsondumpswriter   r   )r	  r  save_directoryr  r  
state_dictweights_namer  state_dict_splitfilenamefull_filenameweights_no_suffixfilename_no_suffixr  tensorsrf  shardr  save_index_filer  contentpath_to_weightss                         r  
save_modelAccelerator.save_model  s   P 77>>.))LL?>*::]^_ J%--/JJJ7>JXUEUEUEWXXX"#kll,,U3J 
NT29*EJ,>(L8J4Pd=.

 

>2HGGLLBM !- 4 4VR @ "*!1!1&"!=**45C ##$566GGNN=11$H$H$M$M$OOMM"45A N222		-(# 3( "2!E!E!K!K!MH>EFgFZ//gEFIIeRWW\\.CXjIk "N
 &&,55.AAE :L5QcO ggll>?KOosW=**U1EL  > KKL^L\ ] 0 D DEF G$$3#4A7 !ggll><HOKK1/1BCD+ G >=s   N2+N
N(c                v    [         R                  " U R                  5      nXR                  UR                  '   U$ )a/  
Registers a pre hook to be run before `save_checkpoint` is called in [`Accelerator.save_state`].

Args:
    hook (`Callable`):
        A function to be called in [`Accelerator.save_state`] before `save_checkpoint`.

The hook should have the following signature:

`hook(models: list[torch.nn.Module], weights: list[dict[str, torch.Tensor]], input_dir: str) -> None`

The `models` argument are the models as saved in the accelerator state under `accelerator._models`, `weights`
argument are the state dicts of the `models`, and the `input_dir` argument is the `input_dir` argument passed
to [`Accelerator.load_state`].

<Tip>

Should only be used in conjunction with [`Accelerator.register_load_state_pre_hook`]. Can be useful to save
configurations in addition to model weights. Can also be used to overwrite model saving with a customized
method. In this case, make sure to remove already loaded weights from the weights list.

</Tip>

Returns:
    `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
    `handle.remove()`
)hooksRemovableHandler  idr	  hookhandles      r  register_save_state_pre_hook(Accelerator.register_save_state_pre_hookk  s1    8 &&t'F'FG59''		2r  c                   U R                   R                  (       a*  [        R                  R	                  U R
                  S5      n[        R                  " USS9  U R                   R                  (       Ga  [        R                  " U5       Vs/ s H"  n[        R                  R	                  X5      PM$     nnU R                   R                  b  [        U5      S-   U R                   R                  :  a  U R                  (       a  S nUR                  US9  [        R                  S[        U5      S-   U R                   R                  -
   S	35        US[        U5      S-   U R                   R                  -
    H  n[        R                  " U5        M     [        R                  R	                  US
U R                    35      n[        R                  R#                  U5      (       a  [%        SU SU R                    S35      eU R'                  5         [        R                  " USS9  [        R)                  SU 35        U R*                  [,        R.                  :X  a  [0        R2                  " 5         / n[5        U R6                  5       GH  u  pU R*                  [,        R8                  :X  aP  [        R)                  S5        [;        U R<                  R>                  X	X5        [        R)                  SU 35        Mt  U R*                  [,        R@                  :X  ax  [        R)                  S5        US:X  a  [B         O
[B         SU 3n
U	RD                  " X40 UD6  [        R)                  S[        R                  R	                  X5       35        GM
  U R*                  [,        RF                  :X  aA  [        R)                  S5        U	RE                  U5        [        R)                  SU 35        GMi  URI                  U RK                  U	SS95        GM     / nU R*                  [,        R8                  :X  a{  [5        U RL                  5       Ha  u  p[        R)                  S5        [O        U R<                  R>                  XU R6                  U   X5        [        R)                  SU 35        Mc     O:U R*                  [,        R@                  [,        RF                  4;  a  U RL                  n/ nU R*                  [,        R@                  :X  aG  [5        U RP                  5       H-  u  p[S        U[T        5      (       a  M  URI                  U5        M/     O+U R*                  [,        RF                  4;  a  U RP                  nU RV                  nU RX                  R[                  5        H  nU" U R6                  Xq5        M     []        UUUUUU R<                  R^                  U R`                  U Rb                  U R                   Rd                  US9
n[5        U Rf                  5       H%  u  nn[i        UXU R                   Rd                  S9  M'     U R                   =Rj                  S-  sl5        U$ s  snf )a  
Saves the current states of the model, optimizer, scaler, RNG generators, and registered objects to a folder.

If a `ProjectConfiguration` was passed to the `Accelerator` object with `automatic_checkpoint_naming` enabled
then checkpoints will be saved to `self.project_dir/checkpoints`. If the number of current saves is greater
than `total_limit` then the oldest save is deleted. Each checkpoint is saved in separate folders named
`checkpoint_<iteration>`.

Otherwise they are just saved to `output_dir`.

<Tip>

Should only be used when wanting to save a checkpoint during training and restoring the state in the same
environment.

</Tip>

Args:
    output_dir (`str` or `os.PathLike`):
        The name of the folder to save all relevant weights and states.
    safe_serialization (`bool`, *optional*, defaults to `True`):
        Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
    save_model_func_kwargs (`dict`, *optional*):
        Additional keyword arguments for saving model which can be passed to the underlying save function, such
        as optional arguments for DeepSpeed's `save_checkpoint` function.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model, optimizer, lr_scheduler = ...
>>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
>>> accelerator.save_state(output_dir="my_checkpoint")
```
checkpointsTr  Nr   c           	     d    [        [        [        [        R                  " SU 5      5      5      S   $ Nz[\/]?([0-9]+)(?=[^\/]*$)r   r   mapr   r  findallfolders    r  r  &Accelerator.save_state.<locals>._inner  s'    C4OQW)X YZ[\]]r  keyz	Deleting z- checkpoints to make room for new checkpoint.checkpoint_zCheckpoint directory z (zc) already exists. Please manually override `self.save_iteration` with what iteration to start with.zSaving current state to zSaving FSDP modelzFSDP Model saved to output dir z$Saving DeepSpeed Model and Optimizerr   r4  z2DeepSpeed Model and Optimizer saved to output dir z1Saving Megatron-LM Model, Optimizer and Schedulerz@Megatron-LM Model , Optimizer and Scheduler saved to output dir F)r  zSaving FSDP Optimizerz#FSDP Optimizer saved to output dir r  )r  )6r   automatic_checkpoint_namingr   r  r  r   r  r  total_limitr   rY  sortr   r$  shutilrmtreerV  existsr   rl   r   r   r,   r   rv  	mark_stepr  r  r   rj   r   r   r   r    save_checkpointr   r  r  r   rk   r  r   rw   r  r  r   r   r2  r   r   r  r  r   rU  )r	  
output_dirr  save_model_func_kwargsr  foldersr  weightsr9  r  ckpt_idr  r[  
schedulersr  r7  r  save_locationr	  s                      r  
save_stateAccelerator.save_state  s   L %%AAd&6&6FJ
J.%%AAAFHjjQ[F\]F\Frww||J7F\G]**66B\A%(B(B(N(NN((^ (Gq 043M3M3Y3Y YZ  [H  I &&aGq(84;U;U;a;a(abFMM&) cjK@S@S?T2UVJww~~j)) +J<r$:M:M9N  Or  s  ""$
J..zl;<  O$7$77LLN !$,,/HA$$(<(<</0

 6 6ZS=j\JK&&/*C*CCBC-.!VZLJ<q9L%%jT=STPQSQXQXQ]Q]^hQrPstu&&/*E*EEOP%%j1^_i^jklt2252GH 0$ 
  O$8$88#D$4$4534#DJJ$:$:Dt||TUXbfA*NO 6 ""?+D+DoFaFa*bb))J 
  O$=$== )$*:*: ;i)BCC!!), !< ""?+F+F*GG))J '' 33::<Dw3 = /JJ$$IIKK"88JJ1
   4 45FAsc:DD^D^DpDpq 6"",,1,w ^s   )Y0c                v    [         R                  " U R                  5      nXR                  UR                  '   U$ )a  
Registers a pre hook to be run before [`load_checkpoint`] is called in [`Accelerator.load_state`].

Args:
    hook (`Callable`):
        A function to be called in [`Accelerator.load_state`] before `load_checkpoint`.

The hook should have the following signature:

`hook(models: list[torch.nn.Module], input_dir: str) -> None`

The `models` argument are the models as saved in the accelerator state under `accelerator._models`, and the
`input_dir` argument is the `input_dir` argument passed to [`Accelerator.load_state`].

<Tip>

Should only be used in conjunction with [`Accelerator.register_save_state_pre_hook`]. Can be useful to load
configurations in addition to model weights. Can also be used to overwrite model loading with a customized
method. In this case, make sure to remove already loaded models from the models list.

</Tip>

Returns:
    `torch.utils.hooks.RemovableHandle`: a handle that can be used to remove the added hook by calling
    `handle.remove()`
)r  r  r  r  r  s      r  register_load_state_pre_hook(Accelerator.register_load_state_pre_hook  s1    6 &&t'F'FG59''		2r  c                N   UbS  [         R                  R                  U5      n[         R                  R                  U5      (       d  [	        SU S35      eOU R
                  R                  (       a  [         R                  R                  U R                  S5      n[         R                  " U5       Vs/ s H"  n[         R                  R                  X5      PM$     nnS nUR                  US9  US   nO[	        S5      e[        R                  S	U 35        / n[        U R                  5       GHz  u  pU R                  [         R"                  :X  aP  [        R                  S
5        [%        U R&                  R(                  X	X5        [        R                  SU 35        Mt  U R                  [         R*                  :X  ax  [        R                  S5        US:X  a  [,         O
[,         SU 3n
U	R.                  " X40 UD6  [        R                  S[         R                  R                  X5       35        GM
  U R                  [         R0                  :X  aA  [        R                  S5        U	R/                  U5        [        R                  SU 35        GMi  UR3                  U	5        GM}     SnU R4                  b  U R6                  (       a  [         R                  R                  U[8        5      n[:        R<                  " U5      nU R4                  R?                  U5        U R4                  RA                  U R4                  RB                  5        [        R                  S5        OU R4                  n/ nU R                  [         R"                  :X  a{  [        U RD                  5       Ha  u  p[        R                  S5        [G        U R&                  R(                  XU R                  U   X5        [        R                  SU 35        Mc     O:U R                  [         R*                  [         R0                  4;  a  U RD                  n/ nU R                  [         R*                  :X  aH  [        U RH                  5       H.  u  nn[K        U[L        5      (       a  M  UR3                  U5        M0     O+U R                  [         R0                  4;  a  U RH                  nU RN                  nU RP                  RS                  5        H  nU" Xq5        M     URU                  SS5      nUcD  U RV                  S:  a2  U RX                  (       a!  U R                  [         RZ                  :w  a  SnOSn[]        UUUUUU R&                  R^                  UUU4	0 UD6nSU;   a
  US   U l0        [         R                  " U5       Vs/ s H  n[b        Rd                  " SU5      c  M  UPM!     nn[g        U5      [g        U Rh                  5      :w  aH  SU S3nUS[g        U5       3-  nUS[g        U Rh                  5       S3-  nUS -  nUS!-  n[k        U5      e[        R                  S"[g        U5       S#35        [        U Rh                  5       H  u  nn[m        UUU5        M     gs  snf s  snf )$a  
Loads the current states of the model, optimizer, scaler, RNG generators, and registered objects.

<Tip>

Should only be used in conjunction with [`Accelerator.save_state`]. If a file is not registered for
checkpointing, it will not be loaded if stored in the directory.

</Tip>

Args:
    input_dir (`str` or `os.PathLike`):
        The name of the folder all relevant weights and states were saved in. Can be `None` if
        `automatic_checkpoint_naming` is used, and will pick up from the latest checkpoint.
    load_kwargs (`dict`, *optional*):
        Additional keyword arguments for the underlying `load` function, such as optional arguments for
        state_dict and optimizer on.
    load_model_func_kwargs (`dict`, *optional*):
        Additional keyword arguments for loading model which can be passed to the underlying load function,
        such as optional arguments for DeepSpeed's `load_checkpoint` function or a `map_location` to load the
        model and optimizer on.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model, optimizer, lr_scheduler = ...
>>> model, optimizer, lr_scheduler = accelerator.prepare(model, optimizer, lr_scheduler)
>>> accelerator.load_state("my_checkpoint")
```
NzTried to find z but folder does not existr  c           	     d    [        [        [        [        R                  " SU 5      5      5      S   $ r  r  r   s    r  r  &Accelerator.load_state.<locals>._inner]  s&    CRZZ0KV%TUVWXYYr  r  rn  zBNo input_dir provided and automatic checkpoint naming is disabled.zLoading states from zLoading FSDP modelz!FSDP Model loaded from input dir z%Loading DeepSpeed Model and Optimizerr   r4  z4DeepSpeed Model and Optimizer loaded from input dir z2Loading Megatron-LM Model, Optimizer and SchedulerzBMegatron-LM Model , Optimizer and Scheduler loaded from input dir z$GradScaler state loaded successfullyzLoading FSDP Optimizerz%FSDP Optimizer loaded from input dir map_locationr   	on_devicer   r   z^custom_checkpoint_\d+\.pkl$z'Number of custom checkpoints in folder z1 does not match the number of registered objects:z
	Found checkpoints: z
	Registered objects: r  zqPlease make sure to only load checkpoints from folders that were created with the same set of registered objects,znor avoid using `custom_checkpoint` in the filename for files in that same directory and load them in manually.zLoading in z custom states)7r   r  
expanduserisdirr   r   r  r  r   r  r  r   r   r  r  r   r,   r   ra   r   r   r   r    load_checkpointr   r  r   r   rp   r  loadload_state_dict_lazy_init_scale_growth_tracker_devicer   rb   r  r   rw   r  r  r   r  r.  r)  r'  r   r2  r   r  searchr   r  rz  r   )r	  	input_dirload_kwargsload_model_func_kwargsr  r  r  r  r9  r  r  r   input_scaler_filescaler_stater  r[  r  r  r7  r  r  override_attributesr  custom_checkpointserrr  r	  s                              r  
load_stateAccelerator.load_state1  s   D  **95I77==++ >)<V!WXX ,''CCT%5%5}EIEGZZPYEZ[EZ6rww||I6EZG[Z LLVL$Iabb*9+67 !$,,/HA$$(<(<<01

 6 6YR?	{KL&&/*C*CCCD-.!VZLJ<q9L%%iS<RSRSUSZSZS_S_`iSsRtuv&&/*E*EEPQ%%i0`aj`klme$ 0( ;;"t}} "Y D ::&78LKK''5 KK778K8KLKK>?[[F 
  O$8$88#D$4$4545#DJJ$:$:Dt||TUXaeCI;OP 6 ""?+D+DoFaFa*bb))J 
  O$=$== )$*:*: ;9i)BCC!!), !< ""?+F+F*GG))J'' 33::<D# = .11.$G!!A%$*;*;@U@UYhYrYr@r*$4JJ$$
 %
 ((+F3DIzz),
,!		:Y[\0]A, 	 
 !"c$*>*>&??9)Duv  ,S1C-D,EFFC-c$2F2F.G-HKKC  G  GC  D  DCs##KK+c*<&=%>nMN'(<(<=
s!#y%8 >W \z
s   4)Z<Z"Z"c                    [        U S5      (       a8  U R                  b$  U R                  R                  R                  5         SU l        [	        U6 n/ U l        / U l        / U l        / U l        SU l	        U$ )a
  
Will release all references to the internal objects stored and call the garbage collector. You should call this
method between two trainings with different models/optimizers. Also will reset `Accelerator.step` to 0.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model, optimizer, scheduler = ...
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
>>> model, optimizer, scheduler = accelerator.free_memory(model, optimizer, scheduler)
```
r   Nr   )
rM  r   r#  destroyrh   r  r   r  r  r   r	  objectss     r  free_memoryAccelerator.free_memory  ss    " 4344,,8--44<<>,0D) '*	r  c                     U R                   " U6 $ )a  
Alias for [`Accelerate.free_memory`], releases all references to the internal objects stored and call the
garbage collector. You should call this method between two trainings with different models/optimizers.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> model, optimizer, scheduler = ...
>>> model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
>>> model, optimizer, scheduler = accelerator.clear(model, optimizer, scheduler)
```
)r5  r3  s     r  clearAccelerator.clear  s      ))r  )r  c                  0 n0 nU GH1  n[        U[        R                  R                  5      (       d  M/  [	        U5      nU(       d6  UR                  UR                  5        VVs0 s H  u  pgXg_M	     snn5        Mw  U R                  [        R                  :X  a
  SSK
Jn  SXH'   U R                  (       a
  SSKJn	  SXI'   UR                  UR                  5        VVs0 s HJ  u  pgU[        U5      U;   a%  [!        Xt[        U5         5      R#                  5       OUR#                  5       _ML     snn5        GM4     U$ s  snnf s  snnf )Nr   )!WeightWithDynamicFloat8CastTensor_tensorrj  ru  )r   r  r  r  rF   r  rN  r   r   r   torchao.float8.fsdp_utilsr;  r   rL  rE  r   r   rv  )
r	  r  r  rN  accessor_mappingr	  r>  r?  r;  rE  s
             r  r!  !Accelerator._get_named_parameters  s"   C#uxx//1#6 $++c>R>R>T,U>TdaQT>T,UV ##~'8'88[JS$G==@0?$- ''
 %($8$8$:	 %;DA 7&66 #1tAw&?@IIKZZ\* %;	' 6  / -V s   &D?AEc                   S nS nU H  n[        U[        R                  R                  5      (       a#  UR	                  5        H  nUR
                  n  O   [        U[        R                  R                  5      (       d  Mz  UR                   H*  n[        US   5      S:  d  M  US   S   R
                  n  M     M     X#4$ )Nrk  r   )
r   r  r  r  r  r   r  r  rs  r   )r	  r  r6  r7  r	  rW  rx  s          r  r   Accelerator._get_devices  s    C#uxx// ^^-E#(<<L . #u{{4455#&#3#3K;x01A5+6x+@+C+J+J( $4  //r  c                   U R                   [        R                  :X  a  U R                  S   S   S:H  nU R                  R	                  S0 5      R	                  SS5      S:  nU(       d  U(       am  UR                  5       (       aM  U(       a  [        SS	S
5      (       d  [        S5      eU(       a  UR                  5       OUR                  5       nU$ [        S5      eSSKJn  U" U R                  U5      R                  5       5      n U$ U R                  (       a  SSKJnJn  U" SSSS9n	U" XS9nU$ U R                   [        R&                  :X  aL  SSKJn
Jn  SSKJn  U
" SSS9nUR1                  XR2                  U5         UR                  5       nSSS5        U$ U(       a  U R                  U5      nUR                  5       nU$ ! , (       d  f       W$ = f)a  
Returns the state dictionary of a model sent through [`Accelerator.prepare`] potentially without full
precision.

Args:
    model (`torch.nn.Module`):
        A PyTorch model sent through [`Accelerator.prepare`]
    unwrap (`bool`, *optional*, defaults to `True`):
        Whether to return the original underlying state_dict of `model` or to return the wrapped state_dict

Returns:
    `dict`: The state dictionary of the model potentially without full precision.

Example:

```python
>>> import torch
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> net = torch.nn.Linear(2, 2)
>>> net = accelerator.prepare(net)
>>> state_dict = accelerator.get_state_dict(net)
```
r  stage   r  r  r   r   r   r   r  r  a  Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or set `zero3_save_16bit_model` to True when using `accelerate config`. To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.)clone_tensors_for_torch_save)StateDictOptionsget_model_state_dictT)full_state_dictbroadcast_from_rank0r  )options)FullStateDictConfigStateDictTyper  )offload_to_cpu
rank0_onlyN)r   r,   r   r  r   'zero_gather_16bit_weights_on_model_saverA   r   _consolidated_16bit_state_dict$_zero3_consolidated_16bit_state_dictr   deepspeed.checkpoint.utilsrE  r  r  r   'torch.distributed.checkpoint.state_dictrF  rG  r   torch.distributed.fsdprK  rL  r  state_dict_typeFULL_STATE_DICT)r	  r  r  zero3_shardingtp_shardingr  rE  rF  rG  rJ  rK  rL  r   full_state_dict_configs                 r  r  Accelerator.get_state_dict)  s   6   O$=$==!223FGPTUUN//334ErJNN}^_`cddK@@BB"+;Kx+X+X) A 
 ' <<>"GGI B 7 %K  T9$:K:KE:R:]:]:_`
$ # ]]f&tRVdhiG-eEJ  ""o&:&::QO%8Y]%^"%%e-J-JLbc"--/
 d 	 ))%0))+J dc s   G		
Gc                D   / nU H8  n[        US5      (       a  [        US5      (       a  M'  UR                  U5        M:     [        U5      S:  a6  Sn[        U5       H  u  pSUSU S[	        U5       S3-  nM     [        U5      eU R                  R                  U5        g)	ap  
Makes note of `objects` and will save or load them in during `save_state` or `load_state`.

These should be utilized when the state is being loaded or saved in the same script. It is not designed to be
used in different scripts.

<Tip>

Every `object` must have a `load_state_dict` and `state_dict` function to be stored.

</Tip>

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> # Assume `CustomObject` has a `state_dict` and `load_state_dict` function.
>>> obj = CustomObject()
>>> accelerator.register_for_checkpointing(obj)
>>> accelerator.save_state("checkpoint.pt")
```
r  r#  r   zxAll `objects` must include a `state_dict` and `load_state_dict` function to be stored. The following inputs are invalid:z
	- Item at index z, ``N)rM  r  r   r  rP   r   r  extend)r	  r4  invalid_objectsr	  r.  r  s         r  register_for_checkpointing&Accelerator.register_for_checkpointingp  s    2 C3--WSBS5T5T&&s+  !# MC'8
-eWC8L7MQOO 9S/!##G,r  c              #     #    U R                   (       a9  U R                   R                  (       a  U R                  XUS9   Sv   SSS5        g[        R	                  S5        Sv   g! , (       d  f       g= f7f)aa  
A context manager that enables context parallel training.

Args:
    buffers (`list[torch.Tensor]`, `optional`):
        Buffers, which are going to be sharded along the sequence dimension. Common examples are inputs, labels
        or positional embedding buffers. This context manager will modify these buffers in-place, and after
        exiting the context, the buffers will be restored to their original state. To avoid unnecessary
        restores, you can use `no_restore_buffers` to specify which buffers don't need to be restored.
    buffer_seq_dims (`list[int]`, `optional`):
        Sequence dimensions of `buffers`.
    no_restore_buffers (`set[torch.Tensor]`, `optional`):
        This set must be a subset of `buffers`. Specifies which buffers from `buffers` argument won't be
        restored after the context exits. These buffers will be then kept in sharded state.

<Tip warning={true}>

`context_parallel` is currently only supported together with FSDP2, and requires `parallelism_config.cp_size` >
1. If either of these conditions are not met, this context manager will have no effect, though to enable fewer
code changes it will not raise an Exception.

</Tip>

<Tip warning={true}>

This context manager has to be recreated with each training step, as shown in the example below.

</Tip>

Example:

```python
>>> for batch in dataloader:
...     with accelerator.maybe_context_parallel(
...         buffers=[batch["input_ids"], batch["attention_mask"]],
...         buffer_seq_dims=[1, 1],
...         no_restore_buffers={batch["input_ids"]},
...     ):
...         outputs = model(batch)
...         ...
```
)buffersbuffer_seq_dimsno_restore_buffersNzContext parallel training is not enabled. This context manager will have no effect. To enable it, set `parallelism_config.cp_size` > 1 in the `Accelerator` constructor.)r   rq  rf  r   warning_once)r	  rb  rc  rd  s       r  maybe_context_parallel"Accelerator.maybe_context_parallel  sl     f ""t'>'>'I'I!!Ug "   
 g  s   =A8A'#A8'
A51A8c              #     #    Uc  U R                   n[        U R                  U5      nU   Sv   SSS5        g! , (       d  f       g= f7f)a  
Will apply automatic mixed-precision inside the block inside this context manager, if it is enabled. Nothing
different will happen otherwise.

A different `autocast_handler` can be passed in to override the one set in the `Accelerator` object. This is
useful in blocks under `autocast` where you want to revert to fp32.

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator(mixed_precision="fp16")
>>> with accelerator.autocast():
...     train()
```
N)r   rO   r   )r	  r   r  s      r  autocastAccelerator.autocast  s<     & ##44>tP`a s   (A	8	A	
AA	c           	   #    #    U=(       d    U R                   =(       d
    [        5       nUR                  5        nUv   SSS5        UR                  c  g[        R
                  " UR                  SS9  WR                  [        R                  R                  UR                  [        R                  " U R                  S95      5        U R                  5         g! , (       d  f       N= f7f)a	  
Will profile the code inside the context manager. The profile will be saved to a Chrome Trace file if
`profile_handler.output_trace_dir` is set.

A different `profile_handler` can be passed in to override the one set in the `Accelerator` object.

Args:
    profile_handler (`ProfileKwargs`, *optional*):
        The profile handler to use for this context manager. If not passed, will use the one set in the
        `Accelerator` object.

Example:

```python
# Profile with default settings
from accelerate import Accelerator
from accelerate.utils import ProfileKwargs

accelerator = Accelerator()
with accelerator.profile() as prof:
    train()
accelerator.print(prof.key_averages().table())


# Profile with the custom handler
def custom_handler(prof):
    print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))


kwargs = ProfileKwargs(schedule_option=dict(wait=1, warmup=1, active=1), on_trace_ready=custom_handler)
accelerator = Accelerator(kwarg_handler=[kwargs])
with accelerator.profile() as prof:
    for _ in range(10):
        train_iteration()
        prof.step()


# Profile and export to Chrome Trace
kwargs = ProfileKwargs(output_trace_dir="output_trace")
accelerator = Accelerator(kwarg_handler=[kwargs])
with accelerator.profile():
    train()
```
NTr  )suffix)r   r8   buildoutput_trace_dirr   r  export_chrome_tracer  r  ro   formatr2  rl   )r	  r   profilers      r  profileAccelerator.profile  s     \ *TT-A-AT]_""$N % ++3
O44tD$$GGLL99;O;V;V^b^p^p;qr	
 	  %$s   7C*CBC*
C'#C*c                P    U R                    H  nUR                  (       d  M    g   g)z
Whether or not the optimizer update was skipped (because of gradient overflow in mixed precision), in which
case the learning rate should not be changed.
TF)r   step_was_skipped)r	  r  s     r  optimizer_step_was_skipped&Accelerator.optimizer_step_was_skipped(  s(     ))I))) * r  c                    [        XS9$ )a  
Creates a new `torch.utils.data.DataLoader` that will efficiently skip the first `num_batches`.

Args:
    dataloader (`torch.utils.data.DataLoader`): The data loader in which to skip batches.
    num_batches (`int`, *optional*, defaults to 0): The number of batches to skip

Example:

```python
>>> from accelerate import Accelerator

>>> accelerator = Accelerator()
>>> dataloader, model, optimizer, scheduler = accelerator.prepare(dataloader, model, optimizer, scheduler)
>>> skipped_dataloader = accelerator.skip_first_batches(dataloader, num_batches=2)
>>> # for the first epoch only
>>> for input, target in skipped_dataloader:
...     optimizer.zero_grad()
...     output = model(input)
...     loss = loss_func(output, target)
...     accelerator.backward(loss)
...     optimizer.step()

>>> # subsequent epochs
>>> for input, target in dataloader:
...     optimizer.zero_grad()
...     ...
```
)num_batches)r   )r	  
dataloaderry  s      r  r   Accelerator.skip_first_batches3  s    < "*FFr  c                0    [         R                  S5        U $ )Nz]Deep copying the `Accelerator` object, note that this will point to the same original object.)r   r   )r	  memos     r  __deepcopy__Accelerator.__deepcopy__S  s    str  c                    UR                  5        H1  n[        US5      (       d  M  [        UR                  5      S:  d  M1    g   g)zk
Verifies that `model` has not been prepared with big model inference with a device-map resembling `auto`.
r~  r   TF)r  rM  r   r~  )r	  r  r  s      r  r  Accelerator.verify_device_mapW  s9    
 Aq/**s1??/Ca/G ! r  c                   [        5       (       a  SSKJnJn  Uc  [	        S5      eSnU R
                   H?  n[        UR                  WW45      (       d  M"  UR                  R                  X5        SnMA     U(       d  [	        S5      eg)z(
Runs backward pass on LOMO optimizers.
r   rU  NzSA learning rate must be passed in order to call backward pass with LOMO optimizers.FTzxBackward pass not properly called on LOMO optimizers. Are you sure you passed a LOMO optimizer in accelerator.prepare()?)	rV   rX  rV  rW  r   r   r   r  fused_backward)r	  rb  r^  rV  rW  _backward_calledr  s          r  ra  Accelerator.lomo_backwardb  s      1 rss ))I)--g??##224G#'  *
   K   r  c                   U R                   (       a  U R                  b  [        U R                  R                  5      $ U R                  b  [        R
                  $ U R                  b  [        R                  $ U R                  b  [        R                  $ OLU R                  R                  b5  U R                  R                  R                  (       a  [        R                  $ [        [        SS5      5      $ )z2Returns the configured backend for training in FP8ACCELERATE_FP8_BACKENDr   )r   r   r   r   r   r   r   r   r   r   r   r   enable_msampre   r  s    r  r   Accelerator.fp8_backendz  s     &&2%d&=&=&E&EFF''3%(((''3%(((**6%+++ 7ZZ((49T9T9a9a!'''34LdSTTr  )%rf  r  r  r  r  r   r  r  r   r   r   r   r  r   r   r   r  r   r   r  r   r   r   r   r   r   r   r   r   r  r   r   r   r   r   r   r   )*r   boolr
  r  r   zPrecisionType | str | Noner  r   r   r  r   zDataLoaderConfiguration | Noner   3DeepSpeedPlugin | dict[str, DeepSpeedPlugin] | Noner   z%FullyShardedDataParallelPlugin | Noner  z TorchTensorParallelPlugin | Noner   zMegatronLMPlugin | Noner  zlist[str | RNGType] | Noner   zRstr | LoggerType | GeneralTracker | list[str | LoggerType | GeneralTracker] | Noner   zstr | os.PathLike | Noner  zProjectConfiguration | Noner   z!GradientAccumulationPlugin | Noner   r  r  zlist[KwargsHandler] | Noner  zDynamoBackend | str | Noner   zTorchDynamoPlugin | Noner   r  r   zParallelismConfig | None)rD  r  )returnzUnion[ParallelismConfig, None])r  r   )F)r  z"list | tuple | dict | torch.Tensorr  r  r"  )r  Callable[..., Any] | None)r  zCallable[..., Any]ri  )r  r  r2  
int | None)r  r  r5  r  )FN)NF)r  torch.nn.Moduler   zbool | Noner  r  )rQ  ztorch.utils.data.DataLoader)r  ztorch.optim.Optimizer)r  r   )r   )rr  r  )r   r   F)TT)r  r  r  r  )r  r   r  dict | Noner  r  )rV  r   r  r  )r   r   r   r  r  r  )10GBT)r  r  r  zUnion[str, os.PathLike]r  zUnion[int, str]r  r  )r  zCallable[..., None]r  zhooks.RemovableHandle)NT)r  
str | Noner  r  )r'  r  r(  r  )T)NNN)rb  zlist[torch.Tensor] | Nonerc  zlist[int] | Nonerd  zset[torch.Tensor] | None)r   r(   )r   zProfileKwargs | None)r   )ry  r   )r  r  r  r  )rb  ztorch.Tensorr^  floatr  None)r  r   )pr  
__module__r  __firstlineno____doc___split_batchesr  propertyr   r  r)  r   r.  r2  r5  r   r
  r<  r@  setterrF  rI  rL  r   r   rV  rY  r\  r_  r   r   rf  r   rk  rt  r{  r  r  r  r  r   r  r  r  r  r  r  r  r  r  staticmethodr  r  r  r  r  r  r  r  rA  r&  r'  r,  r  r)  r(  r*  r+  r%  rF  r-  r   r  r  r_  rg  rj  rn  rs  r  rK   r  rg   rd   r  rl   r  r  r  r  ri   r  r  r  r  r/  r5  r8  r!  r   r  r_  rf  ri  rr  rv  r   r~  r  ra  r   __static_attributes__r   r  r  r   r      s:   \@ "&,6:+,<@PT=A<@6:04gk046:JN.26:5926QU7;-ff f 4	f
 &)f f :f Nf ;f :f 4f .f ef .f 4f  'H!f" (,#f$ 4%f& 3'f( 0)f* O+f, 5-fP 	+ 	+ * * 	
 	
 + + ( ( ( ( . . ! ! 4 4 7 7 3 3 4 4 ; ; 3 3  
 6 6 6 6 4 4 * * 0 0 < < * * # #   - - & &   	d 	d d d c c 	b 	b 	n 	n ' 'R%N(T%N+Z.`  *  * . .` -N  -N^f 2 2 < < - - !''] (] * *X P Pd*" /3 g8R"HHV ejc$c8Cc]acJ	8"HTleN:x*//d fj9$69$v*X'R $D?4B!*F=Y~@6@GR"0H!_F YD0 EIfh 09 09d+@ 37SU O O:+*
D +1#'mEmE 0mE (	mE
 !mE^@EN>V9p:*$ 6;  @0"EN"-H  .2,07;	<*< *< 5	< <|  0 9! 9!v  G@	0 U Ur  r   )
__future__r   r  re  r  r  r   r  r	  r   collectionsr   r   r   typesr   typingr   r	   r
   r  torch.utils.hooksr   r  huggingface_hubr   accelerate.utils.dataclassesr   big_modelingr   checkpointingr   r   r   r   rQ  r   r   r   loggingr   r  r   r   r   r  r   r   r   r   r   trackingr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   utils.constantsrm   rn   ro   rp   utils.modelingrq   utils.otherrr   rs   rt   ru   rv   rw   rx   ry   rz   r{   r|   r}   r~   r   r   r   r   !torch.distributed.algorithms.joinr   torch_xla.core.xla_modelcore	xla_modelrv  )torch_xla.distributed.xla_multiprocessingdistributedxla_multiprocessingr  	torch_nputorch.optim.lr_schedulerr   r   r   r  r   objectr  _dispatch_batches_even_batches_use_seedable_samplerr   r   r  r  <module>r     s   #     	 	   # %   ' '  ! ! > 7 8 o o V V  + 1 + @ @ K KN N N N N N N N N N N N N N N N N N N N^  ; W W   	 	 	 3 ));; 'E4 
H	 H  R?U R?U  EDEs   H	 	HH