o
    hh                     @   s   d dl Z d dlmZ d dlmZmZ d dlmZ ddlm	Z	m
Z
 ddlmZmZmZmZ g dZed	d
G dd dZG dd de jZG dd de jZdS )    N)	dataclass)ListAny)Future   )MetadataMetadataIndex)LoadPlanSavePlanSavePlannerLoadPlanner)WriteResultStorageWriterStorageReaderT)frozenc                   @   s&   e Zd ZU eed< eed< eed< dS )r   indexsize_in_bytesstorage_dataN)__name__
__module____qualname__r   __annotations__intr    r   r   Z/var/www/html/ai/venv/lib/python3.10/site-packages/torch/distributed/checkpoint/storage.pyr      s   
 r   c                	   @   s   e Zd ZdZejdeddfddZejdedefdd	Z	ejd
e
e de
e fddZejdededee
e  fddZejdede
e
e  ddfddZdS )r   a  
    Interface used by ``save_state_dict`` to write to storage.

    One StorageWriter instance acts as both the coordinator and the follower
    in a distributed checkpoint. As part of initialization, each instance
    is told its role.

    A subclass should expect the following sequence of calls.

    1) (all ranks) set_up_storage_writer()
    2) (all ranks) prepare_local_plan()
    3) (coordinator) prepare_global_plan()
    4) (all ranks) write_data()
    5) (coordinator) finish()
    is_coordinatorreturnNc                 C      dS )z
        Initialize this instance.

        Args:
            is_coordinator (bool): Whether this instance is responsible for coordinating
              the checkpoint.
        Nr   )selfr   r   r   r   set_up_storage_writer/      	z#StorageWriter.set_up_storage_writerplanc                 C   r   )a  
        Perform storage-specific local planning.

        While this method can produce a completely different plan, the recommended
        way is to store storage specific data in SavePlan::storage_data.

        Args:
            plan (SavePlan): The local plan from the ``SavePlanner`` in use.

        Returns:
            A transformed ``SavePlan`` after storage local planning
        Nr   r   r!   r   r   r   prepare_local_plan:      z StorageWriter.prepare_local_planplansc                 C   r   )a  
        Perform centralized planning of storage.

        This method is only called on the coordinator instance.

        While this method can produce a completely different plan, the preferred
        way is to store storage specific data in SavePlan::storage_data.

        Args:
            plans: A list of ``SavePlan`` instances, one for each rank.

        Returns:
            A list of transformed ``SavePlan`` after storage global planning
        Nr   r   r%   r   r   r   prepare_global_planJ      z!StorageWriter.prepare_global_planplannerc                 C   r   )a  
        Write all items from ``plan`` using ``planner`` to resolve the data.

        A subclass should call ``SavePlanner::resolve_data`` on each item
        from the plan to get access to the underlying object to write.

        Subclasses should lazily call `resolve_data` as it can allocate memory.
        In case of tensors, make following assumptions:

        - They might be on any device, including not matching the one on ``WriteItem::tensor_data``
        - They might be views or not contiguous. Only the projection needs to be saved.

        Args:
            plan (SavePlan): The save plan to execute.
            planner (SavePlanner): Planner object to be used to resolve items to data.

        Returns:
            A future that completes to a list of WriteResult
        Nr   r   r!   r)   r   r   r   
write_data\   s   zStorageWriter.write_datametadataresultsc                 C   r   )a  
        Writes the metadata and marks the current checkpoint as successful.

        The actual format/schema used for serializing `metadata` is an
        implementation detail. The only requirement is that it's recoverable
        in to the same object graph.

        Args:
            metadata (Metadata): metadata for the new checkpoint
            results: A list of WriteResults from all ranks.

        Returns:
            None
        Nr   )r   r,   r-   r   r   r   finishu   s   zStorageWriter.finish)r   r   r   __doc__abcabstractmethodboolr   r
   r#   r   r'   r   r   r   r+   r   r.   r   r   r   r   r      s0    


r   c                   @   s   e Zd ZdZejdefddZejdededdfdd	Z	ejd
e
de
fddZejdee
 dee
 fddZejd
e
deded fddZdS )r   a  
    Interface used by ``load_state_dict`` to read from storage.

    One StorageReader instance acts as both the coordinator and the follower
    in a distributed checkpoint. As part of initialization, each instance
    is told its role.

    A subclass should expected the following sequence of calls by ``load_state_dict``:

    1) (all ranks) read_metadata()
    2) (all ranks) set_up_storage_reader()
    3) (all ranks) prepare_local_plan()
    4) (coordinator) prepare_global_plan()
    5) (all ranks) read_data()
    r   c                 C   r   )z
        Reads the checkpoint metadata.

        Returns:
            The metadata object associated with the checkpoint being loaded.

        Nr   )r   r   r   r   read_metadata   r    zStorageReader.read_metadatar,   r   Nc                 C   r   )z
        Initialize this instance.

        Args:
            metadata (Metadata): The metadata schema to use.
            is_coordinator (bool): Whether this instance is responsible for coordinating
              the checkpoint.
        Nr   )r   r,   r   r   r   r   set_up_storage_reader   s   
z#StorageReader.set_up_storage_readerr!   c                 C   r   )a  
        Perform storage-specific local planning.

        While this method can produce a completely different plan, the recommended
        way is to store storage specific data in LoadPlan::storage_data.

        Args:
            plan (LoadPlan): The local plan from the ``LoadPlan`` in use.

        Returns:
            A transformed ``LoadPlan`` after storage local planning
        Nr   r"   r   r   r   r#      r$   z StorageReader.prepare_local_planr%   c                 C   r   )a  
        Perform centralized planning of storage loading.

        This method is only called on the coordinator instance.

        While this method can produce a completely different plan, the preferred
        way is to store storage specific data in LoadPlan::storage_data.

        Args:
            plans: A list of ``LoadPlan`` instances, one for each rank.

        Returns:
            A list of transformed ``LoadPlan`` after storage global planning
        Nr   r&   r   r   r   r'      r(   z!StorageReader.prepare_global_planr)   c                 C   r   )a  
        Reads all items from ``plan`` using ``planner`` to resolve the data.

        A subclass should call ``LoadPlanner::load_bytes`` to deserialize a BytesIO
        object into the right place.

        A subclass should call ``LoadPlanner::resolve_tensor`` to get access to the
        tensors that in should load data into.

        It's the StorageLayer responsibility to properly schedule any cross device copies
        required.

        Args:
            plan (LoadPlan): The local plan to execute on
            planner (LoadPlanner): The planner object to use to resolve items.

        Returns:
            A future that completes once all reads are finished.
        Nr   r*   r   r   r   	read_data   s   zStorageReader.read_data)r   r   r   r/   r0   r1   r   r3   r2   r4   r	   r#   r   r'   r   r   r5   r   r   r   r   r      s    
 r   )r0   dataclassesr   typingr   r   torch.futuresr   r,   r   r   r)   r	   r
   r   r   __all__r   ABCr   r   r   r   r   r   <module>   s    l