U
    JºcÄ  ã                	   @   s  d Z ddlmZmZmZmZ ddlZddlmZ ddlm	Z	 ddl
mZ ddlmZmZ dd	gZeejeef Zee Zeeef Zee eee d
œdd„Zde d¡dœee	jeee ef eeee dœdd„Zdde d¡dœee	jeee ef eeeee dœdd	„ZdS )ai  A helper to roughly balance a sequential module.

Usage::

    import torch
    from torch.distributed.pipeline.sync import Pipe
    from torch.distributed.pipeline.sync.balance import balance_by_time

    sample = torch.empty(128, 3, 224, 224)
    balance = balance_by_time(torch.cuda.device_count(), model, sample)

    pipe = Pipe(model, balance, chunks=8)

é    )ÚAnyÚListÚUnionÚSequenceN)ÚTensoré   )Úblockpartition)Úprofile_sizesÚprofile_timesÚbalance_by_timeÚbalance_by_size)ÚcostÚ
partitionsÚreturnc                 C   s   t  | |¡}dd„ |D ƒS )Nc                 S   s   g | ]}t |ƒ‘qS © )Úlen)Ú.0Úpr   r   úU/tmp/pip-unpacked-wheel-gikjz4vx/torch/distributed/pipeline/sync/_balance/__init__.pyÚ
<listcomp>)   s     z balance_cost.<locals>.<listcomp>)r   Zsolve)r   r   Zpartitionedr   r   r   Úbalance_cost'   s    r   g      ð?Zcuda)ÚtimeoutÚdevice)r   ÚmoduleÚsampler   r   r   c                C   s   t |||t |¡ƒ}t|| ƒS )aî  Naive automatic balancing by elapsed time per layer.
    ::

        sample = torch.empty(128, 3, 224, 224)
        balance = balance_by_time(torch.cuda.device_count(), model, sample)
        pipe = Pipe(model, balance, chunks=8)

    Args:
        partitions (int):
            intended number of partitions
        module (torch.nn.Sequential):
            sequential module to be partitioned
        sample (torch.Tensor):
            example input with arbitrary batch size

    Keyword Args:
        timeout (float):
            profiling iterates again if the timeout (in second) is not exceeded
            (default: ``1.0``)
        device ('cpu' or 'cuda' device):
            CPU or CUDA device where each layer is profiled (default: the
            current CUDA device)

    Returns:
        A list of number of layers in each partition. Use it for the `balance`
        parameter of :class:`~torchpipe.Pipe`.

    .. note::
        `module` and `sample` must be placed on the same device.

    )r
   Útorchr   r   )r   r   r   r   r   Útimesr   r   r   r   ,   s    'g       @)ÚchunksÚparam_scaler   )r   r   Úinputr   r   r   r   c                C   s    t ||||t |¡ƒ}t|| ƒS )aÎ	  Naive automatic balancing by CUDA memory usage per layer.

    During training, required memory for parameters depends on which optimizer
    is used. Optimizers may use buffers for each parameter to track
    optimization statistics internally, such as momentum buffer in SGD.

    To get more reliable size based balance, you should specify `param_scale`
    with regard to your optimizer. The default `param_scale` is 2 instead of 1
    due to gradient accumulation which is necessary for every optimizer.

    Follow this guide to choose correct `param_scale` for typical optimizers:

    =========  =============  =========================================
    Optimizer  `param_scale`  Internal State
    =========  =============  =========================================
    SGD        2--3           (momentum_buffer)
    Adam       4--5           exp_avg, exp_avg_sq, (max_exp_avg_sq)
    Adadelta   4              square_avg, acc_delta
    Adagrad    3              sum
    RMSprop    3--5           square_avg, (momentum_buffer), (grad_avg)
    =========  =============  =========================================

    Here's a simple example with the Adam optimizer::

        balance = balance_by_size(
            torch.cuda.device_count(),
            model,

            # Same size with mini-batch to train
            torch.empty(1024, 3, 224, 224),

            # Number of micro-batches to train with Pipe
            chunks=8,

            # 4 for Adam
            param_scale=4.0,
        )

        pipe = Pipe(model, balance, chunks=8)
        adam = Adam(pipe.parameters())

    Args:
        partitions (int):
            intended number of partitions
        module (torch.nn.Sequential):
            sequential module to be partitioned
        input (torch.Tensor):
            example mini-batch with the same size to train

    Keyword Args:
        chunks (int):
            number of micro-batches will be used to train (default: ``1``)
        param_scale (float):
            how many copies of parameters would be allocated for training. It
            depends on optimizer. See the above guide. (default: ``2.0``)
        device ('cuda' device):
            CUDA device where each layer is profiled (default: the current CUDA
            device)

    Returns:
        A list of number of layers in each partition. Use it for the `balance`
        parameter of :class:`~torchpipe.Pipe`.

    .. note::
        `module` and `input` must be placed on the same CUDA device.

    )r	   r   r   r   )r   r   r   r   r   r   Zsizesr   r   r   r   W   s    L)Ú__doc__Útypingr   r   r   r   r   r   Ztorch.nnÚnnÚ r   Zprofiler	   r
   Ú__all__r   ÚintÚstrZDeviceZTensorsZTensorOrTensorsr   Z
SequentialÚfloatr   r   r   r   r   r   Ú<module>   sB   
úù0ùø