| allgather(T *out, T *in, int64_t s) | cpid::distributed::Context | |
| allgather(T *out, torch::Tensor in) | cpid::distributed::Context | |
| allgather(torch::Tensor, torch::Tensor) | cpid::distributed::Context | |
| allreduce(T *ptr, int64_t s, ReduceOp=ReduceOp::SUM) | cpid::distributed::Context | |
| allreduce(std::vector< T > &v, ReduceOp=ReduceOp::SUM) | cpid::distributed::Context | |
| allreduce(torch::Tensor, ReduceOp=ReduceOp::SUM) | cpid::distributed::Context | |
| allreduceGradients(ag::Container const &, ReduceOp=ReduceOp::SUM) | cpid::distributed::Context | |
| barrier() | cpid::distributed::Context | |
| broadcast(T *ptr, int64_t s, int root=0) | cpid::distributed::Context | |
| broadcast(std::vector< T > &v, int root=0) | cpid::distributed::Context | |
| broadcast(torch::Tensor, int root=0) | cpid::distributed::Context | |
| broadcast(ag::Container const &, int root=0) | cpid::distributed::Context | |
| Context(std::shared_ptr< Store > store, int rank, int size, std::chrono::milliseconds timeout=std::chrono::seconds(1000)) | cpid::distributed::Context | |
| rank | cpid::distributed::Context | |
| size | cpid::distributed::Context | |