
    kho                         d dl Z d Z e       r"e j                  j                         s ed       e       rd dlmZmZmZm	Z	m
Z
mZmZmZmZmZmZ  G d d      Zy)    Nc                  6    t        t        j                  d      S )N_dist_autograd_init)hasattrtorch_C     _/var/www/teggl/fontify/venv/lib/python3.12/site-packages/torch/distributed/autograd/__init__.pyis_availabler      s    588233r	   z/Failed to initialize torch.distributed.autograd)_current_context_get_debug_info_get_max_id_init_is_valid_context_new_context_release_context_retrieve_contextbackwardDistAutogradContextget_gradientsc                       e Zd ZdZd Zd Zy)contexta!  
    Context object to wrap forward and backward passes when using
    distributed autograd. The ``context_id`` generated in the ``with``
    statement  is required to uniquely identify a distributed backward pass
    on all workers. Each worker stores metadata associated with this
    ``context_id``, which is required to correctly execute a distributed
    autograd pass.

    Example::
        >>> # xdoctest: +SKIP
        >>> import torch.distributed.autograd as dist_autograd
        >>> with dist_autograd.context() as context_id:
        >>>     t1 = torch.rand((3, 3), requires_grad=True)
        >>>     t2 = torch.rand((3, 3), requires_grad=True)
        >>>     loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
        >>>     dist_autograd.backward(context_id, [loss])
    c                 T    t               | _        | j                  j                         S N)r   autograd_context_context_id)selfs    r
   	__enter__zcontext.__enter__0   s!     ,$$0022r	   c                 J    t        | j                  j                                y r   )r   r   r   )r   typevalue	tracebacks       r
   __exit__zcontext.__exit__4   s    ..::<=r	   N)__name__
__module____qualname____doc__r   r#   r   r	   r
   r   r      s    $3>r	   r   )r   r   r   r   RuntimeErrortorch._C._distributed_autogradr   r   r   r   r   r   r   r   r   r   r   r   r   r	   r
   <module>r*      sT    4 >%((668
H
II>   > >r	   