#include <distributed.h>
|
| Context (std::shared_ptr< Store > store, int rank, int size, std::chrono::milliseconds timeout=std::chrono::seconds(1000)) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | allreduce (T *ptr, int64_t s, ReduceOp=ReduceOp::SUM) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | allreduce (std::vector< T > &v, ReduceOp=ReduceOp::SUM) |
|
Work | allreduce (torch::Tensor, ReduceOp=ReduceOp::SUM) |
|
Work | allreduceGradients (ag::Container const &, ReduceOp=ReduceOp::SUM) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | broadcast (T *ptr, int64_t s, int root=0) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | broadcast (std::vector< T > &v, int root=0) |
|
Work | broadcast (torch::Tensor, int root=0) |
|
Work | broadcast (ag::Container const &, int root=0) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | allgather (T *out, T *in, int64_t s) |
|
template<typename T , IsTorchDType< T > * = nullptr> |
Work | allgather (T *out, torch::Tensor in) |
|
Work | allgather (torch::Tensor, torch::Tensor) |
|
Work | barrier () |
|
cpid::distributed::Context::Context |
( |
std::shared_ptr< Store > |
store, |
|
|
int |
rank, |
|
|
int |
size, |
|
|
std::chrono::milliseconds |
timeout = std::chrono::seconds(1000) |
|
) |
| |
template<typename T , IsTorchDType< T > * = nullptr>
Work cpid::distributed::Context::allgather |
( |
T * |
out, |
|
|
T * |
in, |
|
|
int64_t |
s |
|
) |
| |
template<typename T , IsTorchDType< T > * = nullptr>
Work cpid::distributed::Context::allgather |
( |
T * |
out, |
|
|
torch::Tensor |
in |
|
) |
| |
Work cpid::distributed::Context::allgather |
( |
torch::Tensor |
out, |
|
|
torch::Tensor |
in |
|
) |
| |
template<typename T , IsTorchDType< T > * = nullptr>
Work cpid::distributed::Context::allreduce |
( |
T * |
ptr, |
|
|
int64_t |
s, |
|
|
ReduceOp |
= ReduceOp::SUM |
|
) |
| |
template<typename T , IsTorchDType< T > * >
Work cpid::distributed::Context::allreduce |
( |
std::vector< T > & |
v, |
|
|
ReduceOp |
op = ReduceOp::SUM |
|
) |
| |
Work cpid::distributed::Context::allreduce |
( |
torch::Tensor |
x, |
|
|
ReduceOp |
op = ReduceOp::SUM |
|
) |
| |
Work cpid::distributed::Context::allreduceGradients |
( |
ag::Container const & |
model, |
|
|
ReduceOp |
op = ReduceOp::SUM |
|
) |
| |
Work cpid::distributed::Context::barrier |
( |
| ) |
|
template<typename T , IsTorchDType< T > * = nullptr>
Work cpid::distributed::Context::broadcast |
( |
T * |
ptr, |
|
|
int64_t |
s, |
|
|
int |
root = 0 |
|
) |
| |
template<typename T , IsTorchDType< T > * >
Work cpid::distributed::Context::broadcast |
( |
std::vector< T > & |
v, |
|
|
int |
root = 0 |
|
) |
| |
Work cpid::distributed::Context::broadcast |
( |
torch::Tensor |
x, |
|
|
int |
root = 0 |
|
) |
| |
Work cpid::distributed::Context::broadcast |
( |
ag::Container const & |
model, |
|
|
int |
root = 0 |
|
) |
| |
int cpid::distributed::Context::rank |
int cpid::distributed::Context::size |
The documentation for this class was generated from the following files: