
    N;?iT&                        d dl mZ d dlZd dlmZmZ d dlmZ d dlmZ ddgZ	 G d d	      Z
 G d
 de
      Z G d de
      Zy)    )annotationsN)_C_opspir)	framework)in_dynamic_or_pir_modeL1DecayL2Decayc                  2    e Zd ZdZddZ	 	 	 	 	 	 ddZd Zy)WeightDecayRegularizera  Base class for weight decay regularizers

    Defines the common interface of weight-decay regularizers.
    Weight-decay regularizers are added only during the backward
    pass for faster regularization. They add operations to the network
    that correspond to gradient of the regularization function.
    Users should not use this class directly, but need to use one
    of its implementations
    c                     y N selfs    h/var/www/html/leadgen/airagagent/ocr_fallback/ocr_env/lib/python3.12/site-packages/paddle/regularizer.py__init__zWeightDecayRegularizer.__init__%   s        c                    t         )z8Add corresponding weight decay operations to the networkNotImplementedError)r   paramgradblocks       r   __call__zWeightDecayRegularizer.__call__(   s
     "!r   c                    t         )zDebug stringr   r   s    r   __str__zWeightDecayRegularizer.__str__.   s    !!r   N)returnNoner   paddle.Tensorr   r    r   z	pir.Block)__name__
__module____qualname____doc__r   r   r   r   r   r   r   r      s.    """*7"@I""r   r   c                  B     e Zd ZdZdd fdZ	 	 	 	 	 	 ddZddZ xZS )	r   a	  
    Implement the L1 Weight Decay Regularization, which encourages the weights to be sparse.

    It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
    When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
    ``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
    higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
    in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the  regularizer
    in Optimizer will be used.

    In the implementation, the loss function of L1 Weight Decay Regularization is as follows:

    .. math::

        loss = coeff * reduce\_sum(abs(x))

    Args:
        coeff(float, optional): regularization coeff. Default:0.0.

    Examples:
        .. code-block:: python
            :name: code-example1

            >>> # Example1: set Regularizer in optimizer
            >>> import paddle
            >>> from paddle.regularizer import L1Decay

            >>> linear = paddle.nn.Linear(10, 10)
            >>> inp = paddle.rand(shape=[10, 10], dtype="float32")
            >>> out = linear(inp)
            >>> loss = paddle.mean(out)
            >>> beta1 = paddle.to_tensor([0.9], dtype="float32")
            >>> beta2 = paddle.to_tensor([0.99], dtype="float32")
            >>> momentum = paddle.optimizer.Momentum(
            ...     learning_rate=0.1,
            ...     parameters=linear.parameters(),
            ...     weight_decay=L1Decay(0.0001))
            >>> back = out.backward()
            >>> momentum.step()
            >>> momentum.clear_grad()

        .. code-block:: python
            :name: code-example2

            >>> # Example2: set Regularizer in parameters
            >>> # Set L1 regularization in parameters.
            >>> # Global regularizer does not take effect on my_conv2d for this case.
            >>> from paddle.nn import Conv2D
            >>> from paddle import ParamAttr
            >>> from paddle.regularizer import L1Decay

            >>> my_conv2d = Conv2D(
            ...         in_channels=10,
            ...         out_channels=10,
            ...         kernel_size=1,
            ...         stride=1,
            ...         padding=0,
            ...         weight_attr=ParamAttr(regularizer=L1Decay(coeff=0.01)),
            ...         bias_attr=False)
    c                8    |J t         |           || _        y r   superr   _coeffr   coeff	__class__s     r   r   zL1Decay.__init__q   !       r   c                   t        |t        j                  t        j                  t        j
                  j                  f      sJ t        |t        j                  t        j                  f      sJ t               r7t        j                  |      }t        j                  || j                  dd      S |j                  |j                  |j                  |j                         }|j                  |j                  |j                  |j                         }|j#                  dd|id|i       |j#                  dd|id|id| j                  i	       |S )
aS  Add L1 weight decay ops to network

        Adds L1 weight decay ops.
        L1WeightDecay = reg_coeff * sign(parameter)

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
                Tdtypeshape	lod_levelsignXOut)typeinputsoutputsscaler7   r8   r9   attrs)
isinstancer   Variabler   ValuecoreParameterMetaBlockr   r   r4   r:   r)   
create_varr1   r2   r3   	append_op)r   r   r   r   r4   decays         r   r   zL1Decay.__call__v   s-   $ I&&		3883I3IJ
 	
 
 %)//399!=>>>!#;;u%D<<dkk3==##kk $ D $$kk % E OOS%L5$-  
 OOT{,	   Lr   c                "    d| j                   dS )NzL1Decay, coeff=fr)   r   s    r   r   zL1Decay.__str__        Q00r   r/   r+   floatr   r   r   r   strr!   r"   r#   r$   r   r   r   __classcell__r,   s   @r   r   r   3   s5    ;z
-- - 	-^1r   c                  B     e Zd ZdZdd fdZ	 	 	 	 	 	 ddZddZ xZS )	r	   a	  
    Implement the L2 Weight Decay Regularization, which helps to prevent the model over-fitting.

    It can be set in :ref:`api_paddle_ParamAttr` or ``optimizer`` (such as :ref:`api_paddle_optimizer_Momentum` ).
    When set in ``ParamAttr`` , it only takes effect for trainable parameters in this layer. When set in
    ``optimizer`` , it takes effect for all trainable parameters. When set together, ``ParamAttr`` has
    higher priority than ``optimizer`` , which means that for a trainable parameter, if regularizer is defined
    in its ParamAttr, then the regularizer in Optimizer will be ignored. Otherwise the  regularizer
    in Optimizer will be used.

    In the implementation, the loss function of L2 Weight Decay Regularization is as follows:

    .. math::

        loss = 0.5 * coeff * reduce\_sum(square(x))

    Args:
        coeff(float, optional): regularization coeff. Default:0.0

    Examples:
        .. code-block:: python
            :name: code-example1

            >>> # Example1: set Regularizer in optimizer
            >>> import paddle
            >>> from paddle.regularizer import L2Decay
            >>> linear = paddle.nn.Linear(10, 10)
            >>> inp = paddle.rand(shape=[10, 10], dtype="float32")
            >>> out = linear(inp)
            >>> loss = paddle.mean(out)
            >>> beta1 = paddle.to_tensor([0.9], dtype="float32")
            >>> beta2 = paddle.to_tensor([0.99], dtype="float32")
            >>> momentum = paddle.optimizer.Momentum(
            ...     learning_rate=0.1,
            ...     parameters=linear.parameters(),
            ...     weight_decay=L2Decay(0.0001))
            >>> back = out.backward()
            >>> momentum.step()
            >>> momentum.clear_grad()

        .. code-block:: python
            :name: code-example2

            >>> # Example2: set Regularizer in parameters
            >>> # Set L2 regularization in parameters.
            >>> # Global regularizer does not take effect on my_conv2d for this case.
            >>> from paddle.nn import Conv2D
            >>> from paddle import ParamAttr
            >>> from paddle.regularizer import L2Decay

            >>> my_conv2d = Conv2D(
            ...         in_channels=10,
            ...         out_channels=10,
            ...         kernel_size=1,
            ...         stride=1,
            ...         padding=0,
            ...         weight_attr=ParamAttr(regularizer=L2Decay(coeff=0.01)),
            ...         bias_attr=False)
    c                8    |J t         |           || _        y r   r'   r*   s     r   r   zL2Decay.__init__   r-   r   c                   t        |t        j                  t        j                  t        j
                  j                  f      sJ t        |t        j                  t        j                  f      sJ t               r"t        j                  || j                  dd      S |j                  |j                  |j                  |j                        }|j!                  dd|id|id| j                  i       |S )aM  Add L2 weight decay ops to network

        Adds L2 weight decay ops.
        L2WeightDecay = reg_coeff * parameter

        Args:
            param: parameter variable for which regularization is applied
            block: block in which variable is to be created

        Returns:
            new variable for weight decay
        r/   Tr0   r:   r5   r6   r;   )r=   r   r>   r   r?   r@   rA   rB   r   r   r:   r)   rC   r1   r2   r3   rD   )r   r   r   r   rE   s        r   r   zL2Decay.__call__   s    $ I&&		3883I3IJ
 	
 
 %)//399!=>>>!#<<t{{C>>$$kk % E
 OOU|,	   Lr   c                "    d| j                   dS )NzL2Decay, coeff=rG   rH   r   s    r   r   zL2Decay.__str__  rI   r   rJ   rK   r   rM   rO   rQ   s   @r   r	   r	      s5    :x
&& & 	&P1r   )
__future__r   paddler   r   paddle.baser   paddle.base.frameworkr   __all__r   r   r	   r   r   r   <module>r[      sI     #   ! 8i
 " "2s1$ s1lk1$ k1r   