Open3D (C++ API)
0.18.0+252c867
|
Namespaces | |
cuda | |
eigen_converter | |
kernel | |
linalg | |
nns | |
shape_util | |
sycl | |
tensor_check | |
tensor_init | |
Typedefs | |
template<typename Key > | |
using | InternalStdGPUHashBackendAllocator = StdGPUAllocator< thrust::pair< const Key, buf_index_t > > |
template<typename Key , typename Hash , typename Eq > | |
using | InternalStdGPUHashBackend = stdgpu::unordered_map< Key, buf_index_t, Hash, Eq, InternalStdGPUHashBackendAllocator< Key > > |
using | buf_index_t = uint32_t |
template<class T > | |
using | SmallVectorSizeType = typename std::conditional< sizeof(T)< 4 &&sizeof(void *) >=8, uint64_t, uint32_t >::type |
template<typename RangeType > | |
using | ValueTypeFromRangeType = typename std::remove_const< typename std::remove_reference< decltype(*std::begin(std::declval< RangeType & >()))>::type >::type |
Enumerations | |
enum class | HashBackendType { Slab , StdGPU , TBB , Default } |
enum class | DtypePolicy { NONE , ALL_SAME , INPUT_SAME , INPUT_SAME_OUTPUT_BOOL } |
Functions | |
uint32_t | AtomicFetchAddRelaxed (uint32_t *address, uint32_t val) |
uint64_t | AtomicFetchAddRelaxed (uint64_t *address, uint64_t val) |
void | CPUResetHeap (Tensor &heap) |
std::shared_ptr< DeviceHashBackend > | CreateCPUHashBackend (int64_t init_capacity, const Dtype &key_dtype, const SizeVector &key_element_shape, const std::vector< Dtype > &value_dtypes, const std::vector< SizeVector > &value_element_shapes, const Device &device, const HashBackendType &backend) |
Non-templated factory. More... | |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | InsertKernelPass0 (SlabHashBackendImpl< Key, Hash, Eq > impl, const void *input_keys, buf_index_t *output_buf_indices, int heap_counter_prev, int64_t count) |
Kernels. More... | |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | InsertKernelPass1 (SlabHashBackendImpl< Key, Hash, Eq > impl, const void *input_keys, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq , typename block_t > | |
__global__ void | InsertKernelPass2 (SlabHashBackendImpl< Key, Hash, Eq > impl, const void *const *input_values_soa, buf_index_t *output_buf_indices, bool *output_masks, int64_t count, int64_t n_values) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | FindKernel (SlabHashBackendImpl< Key, Hash, Eq > impl, const void *input_keys, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | EraseKernelPass0 (SlabHashBackendImpl< Key, Hash, Eq > impl, const void *input_keys, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | EraseKernelPass1 (SlabHashBackendImpl< Key, Hash, Eq > impl, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | GetActiveIndicesKernel (SlabHashBackendImpl< Key, Hash, Eq > impl, buf_index_t *output_buf_indices, uint32_t *output_count) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | CountElemsPerBucketKernel (SlabHashBackendImpl< Key, Hash, Eq > impl, int64_t *bucket_elem_counts) |
__global__ void | CountSlabsPerSuperblockKernel (SlabNodeManagerImpl impl, uint32_t *slabs_per_superblock) |
template<typename First , typename Second > | |
OPEN3D_HOST_DEVICE Pair< First, Second > | make_pair (const First &_first, const Second &_second) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | STDGPUFindKernel (InternalStdGPUHashBackend< Key, Hash, Eq > map, CUDAHashBackendBufferAccessor buffer_accessor, const Key *input_keys, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq > | |
__global__ void | STDGPUEraseKernel (InternalStdGPUHashBackend< Key, Hash, Eq > map, CUDAHashBackendBufferAccessor buffer_accessor, const Key *input_keys, buf_index_t *output_buf_indices, bool *output_masks, int64_t count) |
template<typename Key , typename Hash , typename Eq , typename block_t > | |
__global__ void | STDGPUInsertKernel (InternalStdGPUHashBackend< Key, Hash, Eq > map, CUDAHashBackendBufferAccessor buffer_accessor, const Key *input_keys, const void *const *input_values_soa, buf_index_t *output_buf_indices, bool *output_masks, int64_t count, int64_t n_values) |
std::shared_ptr< DeviceHashBackend > | CreateDeviceHashBackend (int64_t init_capacity, const Dtype &key_dtype, const SizeVector &key_element_shape, const std::vector< Dtype > &value_dtypes, const std::vector< SizeVector > &value_element_shapes, const Device &device, const HashBackendType &backend) |
std::shared_ptr< DeviceHashBackend > | CreateCUDAHashBackend (int64_t init_capacity, const Dtype &key_dtype, const SizeVector &key_element_shape, const std::vector< Dtype > &value_dtypes, const std::vector< SizeVector > &value_element_shapes, const Device &device, const HashBackendType &backend) |
void | AddMM (const Tensor &A, const Tensor &B, Tensor &output, double alpha, double beta) |
void | AddMMCPU (void *A_data, void *B_data, void *C_data, int64_t m, int64_t k, int64_t n, double alpha, double beta, bool gemmTrA, bool gemmTrB, int lda, int ldb, int ldc, Dtype dtype) |
void | AddMMCUDA (void *A_data, void *B_data, void *C_data, int64_t m, int64_t k, int64_t n, double alpha, double beta, bool gemmTrA, bool gemmTrB, int lda, int ldb, int ldc, Dtype dtype, const Device &device) |
template<typename scalar_t > | |
void | gemm_cpu (CBLAS_LAYOUT layout, CBLAS_TRANSPOSE trans_A, CBLAS_TRANSPOSE trans_B, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT k, scalar_t alpha, const scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, const scalar_t *B_data, OPEN3D_CPU_LINALG_INT ldb, scalar_t beta, scalar_t *C_data, OPEN3D_CPU_LINALG_INT ldc) |
template<> | |
void | gemm_cpu< float > (CBLAS_LAYOUT layout, CBLAS_TRANSPOSE trans_A, CBLAS_TRANSPOSE trans_B, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT k, float alpha, const float *A_data, OPEN3D_CPU_LINALG_INT lda, const float *B_data, OPEN3D_CPU_LINALG_INT ldb, float beta, float *C_data, OPEN3D_CPU_LINALG_INT ldc) |
template<> | |
void | gemm_cpu< double > (CBLAS_LAYOUT layout, CBLAS_TRANSPOSE trans_A, CBLAS_TRANSPOSE trans_B, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT k, double alpha, const double *A_data, OPEN3D_CPU_LINALG_INT lda, const double *B_data, OPEN3D_CPU_LINALG_INT ldb, double beta, double *C_data, OPEN3D_CPU_LINALG_INT ldc) |
double | Det (const Tensor &A) |
void | Inverse (const Tensor &A, Tensor &output) |
Computes A^{-1} with LU factorization, where A is a N x N square matrix. More... | |
void | InverseCPU (void *A_data, void *ipiv_data, void *output_data, int64_t n, Dtype dtype, const Device &device) |
void | InverseCUDA (void *A_data, void *ipiv_data, void *output_data, int64_t n, Dtype dtype, const Device &device) |
template<typename scalar_t > | |
OPEN3D_CPU_LINALG_INT | getrf_cpu (int layout, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<typename scalar_t > | |
OPEN3D_CPU_LINALG_INT | getri_cpu (int layout, OPEN3D_CPU_LINALG_INT n, scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<typename scalar_t > | |
OPEN3D_CPU_LINALG_INT | gesv_cpu (int layout, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT m, scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data, scalar_t *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<typename scalar_t > | |
OPEN3D_CPU_LINALG_INT | gels_cpu (int matrix_layout, char trans, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT nrhs, scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, scalar_t *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<typename scalar_t > | |
OPEN3D_CPU_LINALG_INT | gesvd_cpu (int matrix_layout, char jobu, char jobvt, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, scalar_t *A_data, OPEN3D_CPU_LINALG_INT lda, scalar_t *S_data, scalar_t *U_data, OPEN3D_CPU_LINALG_INT ldu, scalar_t *VT_data, OPEN3D_CPU_LINALG_INT ldvt, scalar_t *superb) |
template<> | |
OPEN3D_CPU_LINALG_INT | getrf_cpu< float > (int layout, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, float *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<> | |
OPEN3D_CPU_LINALG_INT | getrf_cpu< double > (int layout, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, double *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<> | |
OPEN3D_CPU_LINALG_INT | getri_cpu< float > (int layout, OPEN3D_CPU_LINALG_INT n, float *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<> | |
OPEN3D_CPU_LINALG_INT | getri_cpu< double > (int layout, OPEN3D_CPU_LINALG_INT n, double *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data) |
template<> | |
OPEN3D_CPU_LINALG_INT | gesv_cpu< float > (int layout, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT m, float *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data, float *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<> | |
OPEN3D_CPU_LINALG_INT | gesv_cpu< double > (int layout, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT m, double *A_data, OPEN3D_CPU_LINALG_INT lda, OPEN3D_CPU_LINALG_INT *ipiv_data, double *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<> | |
OPEN3D_CPU_LINALG_INT | gels_cpu< float > (int layout, char trans, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT nrhs, float *A_data, OPEN3D_CPU_LINALG_INT lda, float *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<> | |
OPEN3D_CPU_LINALG_INT | gels_cpu< double > (int layout, char trans, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, OPEN3D_CPU_LINALG_INT nrhs, double *A_data, OPEN3D_CPU_LINALG_INT lda, double *B_data, OPEN3D_CPU_LINALG_INT ldb) |
template<> | |
OPEN3D_CPU_LINALG_INT | gesvd_cpu< float > (int layout, char jobu, char jobvt, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, float *A_data, OPEN3D_CPU_LINALG_INT lda, float *S_data, float *U_data, OPEN3D_CPU_LINALG_INT ldu, float *VT_data, OPEN3D_CPU_LINALG_INT ldvt, float *superb) |
template<> | |
OPEN3D_CPU_LINALG_INT | gesvd_cpu< double > (int layout, char jobu, char jobvt, OPEN3D_CPU_LINALG_INT m, OPEN3D_CPU_LINALG_INT n, double *A_data, OPEN3D_CPU_LINALG_INT lda, double *S_data, double *U_data, OPEN3D_CPU_LINALG_INT ldu, double *VT_data, OPEN3D_CPU_LINALG_INT ldvt, double *superb) |
void | LeastSquares (const Tensor &A, const Tensor &B, Tensor &X) |
Solve AX = B with QR decomposition. A is a full-rank m x n matrix (m >= n). More... | |
void | LeastSquaresCPU (void *A_data, void *B_data, int64_t m, int64_t n, int64_t k, Dtype dtype, const Device &device) |
void | LeastSquaresCUDA (void *A_data, void *B_data, int64_t m, int64_t n, int64_t k, Dtype dtype, const Device &device) |
void | OPEN3D_LAPACK_CHECK (OPEN3D_CPU_LINALG_INT info, const std::string &msg) |
void | LUIpiv (const Tensor &A, Tensor &ipiv, Tensor &output) |
void | LU (const Tensor &A, Tensor &permutation, Tensor &lower, Tensor &upper, const bool permute_l) |
void | LUCPU (void *A_data, void *ipiv_data, int64_t rows, int64_t cols, Dtype dtype, const Device &device) |
void | LUCUDA (void *A_data, void *ipiv_data, int64_t rows, int64_t cols, Dtype dtype, const Device &device) |
void | Matmul (const Tensor &A, const Tensor &B, Tensor &C) |
Computes matrix multiplication C = AB. More... | |
void | MatmulCPU (void *A_data, void *B_data, void *C_data, int64_t m, int64_t k, int64_t n, Dtype dtype) |
void | MatmulCUDA (void *A_data, void *B_data, void *C_data, int64_t m, int64_t k, int64_t n, Dtype dtype, const Device &device) |
void | Solve (const Tensor &A, const Tensor &B, Tensor &X) |
Solve AX = B with LU decomposition. A is a square matrix. More... | |
void | SolveCPU (void *A_data, void *B_data, void *ipiv_data, int64_t n, int64_t k, Dtype dtype, const Device &device) |
void | SolveCUDA (void *A_data, void *B_data, void *ipiv_data, int64_t n, int64_t k, Dtype dtype, const Device &device) |
void | SVD (const Tensor &A, Tensor &U, Tensor &S, Tensor &VT) |
void | SVDCPU (const void *A_data, void *U_data, void *S_data, void *VT_data, void *superb_data, int64_t m, int64_t n, Dtype dtype, const Device &device) |
void | SVDCUDA (const void *A_data, void *U_data, void *S_data, void *VT_data, void *superb_data, int64_t m, int64_t n, Dtype dtype, const Device &device) |
void | Triu (const Tensor &A, Tensor &output, const int diagonal) |
void | Tril (const Tensor &A, Tensor &output, const int diagonal) |
void | Triul (const Tensor &A, Tensor &upper, Tensor &lower, const int diagonal) |
void | TriuCPU (const Tensor &A, Tensor &output, const int diagonal) |
void | TrilCPU (const Tensor &A, Tensor &output, const int diagonal) |
void | TriulCPU (const Tensor &A, Tensor &upper, Tensor &lower, const int diagonal) |
template<typename func_t > | |
void | ParallelForCPU_ (const Device &device, int64_t n, const func_t &func) |
Run a function in parallel on CPU. More... | |
template<typename func_t > | |
void | ParallelFor (const Device &device, int64_t n, const func_t &func) |
template<typename vec_func_t , typename func_t > | |
void | ParallelFor (const Device &device, int64_t n, const func_t &func, const vec_func_t &vec_func) |
void * | safe_malloc (size_t Sz) |
void * | safe_realloc (void *Ptr, size_t Sz) |
template<typename T , unsigned N> | |
size_t | capacity_in_bytes (const SmallVector< T, N > &X) |
template<unsigned Size, typename R > | |
SmallVector< ValueTypeFromRangeType< R >, Size > | to_vector (R &&Range) |
template<typename R > | |
SmallVector< ValueTypeFromRangeType< R >, CalculateSmallVectorDefaultInlinedElements< ValueTypeFromRangeType< R > >::value > | to_vector (R &&Range) |
template<typename T > | |
Tensor | operator+ (T scalar_lhs, const Tensor &rhs) |
template<typename T > | |
Tensor | operator- (T scalar_lhs, const Tensor &rhs) |
template<typename T > | |
Tensor | operator* (T scalar_lhs, const Tensor &rhs) |
template<typename T > | |
Tensor | operator/ (T scalar_lhs, const Tensor &rhs) |
Tensor | Concatenate (const std::vector< Tensor > &tensors, const utility::optional< int64_t > &axis=0) |
Concatenates the list of tensors in their order, along the given axis into a new tensor. All the tensors must have same data-type, device, and number of dimensions. All dimensions must be the same, except the dimension along the axis the tensors are to be concatenated. Using Concatenate for a single tensor, the tensor is split along its first dimension (length), and concatenated along the axis. More... | |
Tensor | Append (const Tensor &self, const Tensor &other, const utility::optional< int64_t > &axis=utility::nullopt) |
Appends the two tensors, along the given axis into a new tensor. Both the tensors must have same data-type, device, and number of dimensions. All dimensions must be the same, except the dimension along the axis the tensors are to be appended. More... | |
Tensor | Maximum (const Tensor &input, const Tensor &other) |
Computes the element-wise maximum of input and other. The tensors must have same data type and device. More... | |
Tensor | Minimum (const Tensor &input, const Tensor &other) |
Computes the element-wise minimum of input and other. The tensors must have same data type and device. More... | |
Variables | |
const Dtype | Undefined = Dtype::Undefined |
const Dtype | Float32 = Dtype::Float32 |
const Dtype | Float64 = Dtype::Float64 |
const Dtype | Int8 = Dtype::Int8 |
const Dtype | Int16 = Dtype::Int16 |
const Dtype | Int32 = Dtype::Int32 |
const Dtype | Int64 = Dtype::Int64 |
const Dtype | UInt8 = Dtype::UInt8 |
const Dtype | UInt16 = Dtype::UInt16 |
const Dtype | UInt32 = Dtype::UInt32 |
const Dtype | UInt64 = Dtype::UInt64 |
const Dtype | Bool = Dtype::Bool |
template<typename T , unsigned N> | |
class LLVM_GSL_OWNER | SmallVector |
constexpr utility::nullopt_t | None {utility::nullopt_t::init()} |
using open3d::core::buf_index_t = typedef uint32_t |
using open3d::core::InternalStdGPUHashBackend = typedef stdgpu::unordered_map<Key, buf_index_t, Hash, Eq, InternalStdGPUHashBackendAllocator<Key> > |
using open3d::core::InternalStdGPUHashBackendAllocator = typedef StdGPUAllocator<thrust::pair<const Key, buf_index_t> > |
using open3d::core::SmallVectorSizeType = typedef typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t, uint32_t>::type |
using open3d::core::ValueTypeFromRangeType = typedef typename std::remove_const<typename std::remove_reference<decltype( *std::begin(std::declval<RangeType &>()))>::type>::type |
|
strong |
|
strong |
void open3d::core::AddMM | ( | const Tensor & | A, |
const Tensor & | B, | ||
Tensor & | C, | ||
double | alpha, | ||
double | beta | ||
) |
Computes matrix multiplication C = alpha * A @ B + beta * C. If matrix A is a (n x m) tensor, and B is a (m x p) tensor, C should have a shape (n x p). alpha and beta are scaling factors on matrix-matrix multiplication and the added matrix input respectively.
void open3d::core::AddMMCPU | ( | void * | A_data, |
void * | B_data, | ||
void * | C_data, | ||
int64_t | m, | ||
int64_t | k, | ||
int64_t | n, | ||
double | alpha, | ||
double | beta, | ||
bool | gemmTrA, | ||
bool | gemmTrB, | ||
int | lda, | ||
int | ldb, | ||
int | ldc, | ||
Dtype | dtype | ||
) |
void open3d::core::AddMMCUDA | ( | void * | A_data, |
void * | B_data, | ||
void * | C_data, | ||
int64_t | m, | ||
int64_t | k, | ||
int64_t | n, | ||
double | alpha, | ||
double | beta, | ||
bool | gemmTrA, | ||
bool | gemmTrB, | ||
int | lda, | ||
int | ldb, | ||
int | ldc, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
Tensor open3d::core::Append | ( | const Tensor & | self, |
const Tensor & | other, | ||
const utility::optional< int64_t > & | axis = utility::nullopt |
||
) |
Appends the two tensors, along the given axis into a new tensor. Both the tensors must have same data-type, device, and number of dimensions. All dimensions must be the same, except the dimension along the axis the tensors are to be appended.
This is the same as NumPy's semantics:
Example:
self | Values are appended to a copy of this tensor. |
other | Values of this tensor is appended to the self . |
axis | [optional] The axis along which values are appended. If axis is not given, both tensors are flattened before use. |
tensor
with values
appended to axis. Note that append does not occur in-place: a new array is allocated and filled. If axis is None, out is a flattened tensor.
|
inline |
Adds val
to the value stored at address
and returns the previous stored value as an atomic operation. This function does not impose any ordering on concurrent memory accesses.
|
inline |
Adds val
to the value stored at address
and returns the previous stored value as an atomic operation. This function does not impose any ordering on concurrent memory accesses.
|
inline |
Tensor open3d::core::Concatenate | ( | const std::vector< Tensor > & | tensors, |
const utility::optional< int64_t > & | axis = 0 |
||
) |
Concatenates the list of tensors in their order, along the given axis into a new tensor. All the tensors must have same data-type, device, and number of dimensions. All dimensions must be the same, except the dimension along the axis the tensors are to be concatenated. Using Concatenate for a single tensor, the tensor is split along its first dimension (length), and concatenated along the axis.
This is the same as NumPy's semantics:
Example:
tensors | Vector of tensors to be concatenated. If only one tensor is present, the tensor is split along its first dimension (length), and concatenated along the axis. |
axis | [optional] The axis along which values are concatenated. [Default axis is 0]. |
__global__ void open3d::core::CountElemsPerBucketKernel | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
int64_t * | bucket_elem_counts | ||
) |
__global__ void open3d::core::CountSlabsPerSuperblockKernel | ( | SlabNodeManagerImpl | impl, |
uint32_t * | slabs_per_superblock | ||
) |
void open3d::core::CPUResetHeap | ( | Tensor & | heap | ) |
std::shared_ptr< DeviceHashBackend > open3d::core::CreateCPUHashBackend | ( | int64_t | init_capacity, |
const Dtype & | key_dtype, | ||
const SizeVector & | key_element_shape, | ||
const std::vector< Dtype > & | value_dtypes, | ||
const std::vector< SizeVector > & | value_element_shapes, | ||
const Device & | device, | ||
const HashBackendType & | backend | ||
) |
Non-templated factory.
std::shared_ptr<DeviceHashBackend> open3d::core::CreateCUDAHashBackend | ( | int64_t | init_capacity, |
const Dtype & | key_dtype, | ||
const SizeVector & | key_element_shape, | ||
const std::vector< Dtype > & | value_dtypes, | ||
const std::vector< SizeVector > & | value_element_shapes, | ||
const Device & | device, | ||
const HashBackendType & | backend | ||
) |
std::shared_ptr< DeviceHashBackend > open3d::core::CreateDeviceHashBackend | ( | int64_t | init_capacity, |
const Dtype & | key_dtype, | ||
const SizeVector & | key_element_shape, | ||
const std::vector< Dtype > & | value_dtypes, | ||
const std::vector< SizeVector > & | value_element_shapes, | ||
const Device & | device, | ||
const HashBackendType & | backend | ||
) |
Factory functions:
double open3d::core::Det | ( | const Tensor & | A | ) |
__global__ void open3d::core::EraseKernelPass0 | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
const void * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
__global__ void open3d::core::EraseKernelPass1 | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
__global__ void open3d::core::FindKernel | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
const void * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
__global__ void open3d::core::GetActiveIndicesKernel | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
buf_index_t * | output_buf_indices, | ||
uint32_t * | output_count | ||
) |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
|
inline |
__global__ void open3d::core::InsertKernelPass0 | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
const void * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
int | heap_counter_prev, | ||
int64_t | count | ||
) |
Kernels.
__global__ void open3d::core::InsertKernelPass1 | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
const void * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
__global__ void open3d::core::InsertKernelPass2 | ( | SlabHashBackendImpl< Key, Hash, Eq > | impl, |
const void *const * | input_values_soa, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count, | ||
int64_t | n_values | ||
) |
Computes A^{-1} with LU factorization, where A is a N x N square matrix.
void open3d::core::InverseCPU | ( | void * | A_data, |
void * | ipiv_data, | ||
void * | output_data, | ||
int64_t | n, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::InverseCUDA | ( | void * | A_data, |
void * | ipiv_data, | ||
void * | output_data, | ||
int64_t | n, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
Solve AX = B with QR decomposition. A is a full-rank m x n matrix (m >= n).
void open3d::core::LeastSquaresCPU | ( | void * | A_data, |
void * | B_data, | ||
int64_t | m, | ||
int64_t | n, | ||
int64_t | k, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::LeastSquaresCUDA | ( | void * | A_data, |
void * | B_data, | ||
int64_t | m, | ||
int64_t | n, | ||
int64_t | k, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::LU | ( | const Tensor & | A, |
Tensor & | permutation, | ||
Tensor & | lower, | ||
Tensor & | upper, | ||
const bool | permute_l | ||
) |
void open3d::core::LUCPU | ( | void * | A_data, |
void * | ipiv_data, | ||
int64_t | rows, | ||
int64_t | cols, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::LUCUDA | ( | void * | A_data, |
void * | ipiv_data, | ||
int64_t | rows, | ||
int64_t | cols, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
OPEN3D_HOST_DEVICE Pair<First, Second> open3d::core::make_pair | ( | const First & | _first, |
const Second & | _second | ||
) |
Computes matrix multiplication C = AB.
void open3d::core::MatmulCPU | ( | void * | A_data, |
void * | B_data, | ||
void * | C_data, | ||
int64_t | m, | ||
int64_t | k, | ||
int64_t | n, | ||
Dtype | dtype | ||
) |
void open3d::core::MatmulCUDA | ( | void * | A_data, |
void * | B_data, | ||
void * | C_data, | ||
int64_t | m, | ||
int64_t | k, | ||
int64_t | n, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
Computes the element-wise maximum of input and other. The tensors must have same data type and device.
If input.GetShape() != other.GetShape(), then they will be broadcasted to a common shape (which becomes the shape of the output).
input | The input tensor. |
other | The second input tensor. |
Computes the element-wise minimum of input and other. The tensors must have same data type and device.
If input.GetShape() != other.GetShape(), then they will be broadcasted to a common shape (which becomes the shape of the output).
input | The input tensor. |
other | The second input tensor. |
|
inline |
void open3d::core::ParallelFor | ( | const Device & | device, |
int64_t | n, | ||
const func_t & | func | ||
) |
Run a function in parallel on CPU or CUDA.
device | The device for the parallel for loop to run on. |
n | The number of workloads. |
func | The function to be executed in parallel. The function should take an int64_t workload index and returns void, i.e., void func(int64_t) . |
func
takes the same time. void open3d::core::ParallelFor | ( | const Device & | device, |
int64_t | n, | ||
const func_t & | func, | ||
const vec_func_t & | vec_func | ||
) |
Run a potentially vectorized function in parallel on CPU or CUDA.
device | The device for the parallel for loop to run on. |
n | The number of workloads. |
func | The function to be executed in parallel. The function should take an int64_t workload index and returns void, i.e., void func(int64_t) . |
vec_func | The vectorized function to be executed in parallel. The function should be provided using the OPEN3D_VECTORIZED macro, e.g., OPEN3D_VECTORIZED(MyISPCKernel, some_used_variable) . |
func
takes the same time. Example:
void open3d::core::ParallelForCPU_ | ( | const Device & | device, |
int64_t | n, | ||
const func_t & | func | ||
) |
Run a function in parallel on CPU.
|
inline |
|
inline |
Solve AX = B with LU decomposition. A is a square matrix.
void open3d::core::SolveCPU | ( | void * | A_data, |
void * | B_data, | ||
void * | ipiv_data, | ||
int64_t | n, | ||
int64_t | k, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::SolveCUDA | ( | void * | A_data, |
void * | B_data, | ||
void * | ipiv_data, | ||
int64_t | n, | ||
int64_t | k, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
__global__ void open3d::core::STDGPUEraseKernel | ( | InternalStdGPUHashBackend< Key, Hash, Eq > | map, |
CUDAHashBackendBufferAccessor | buffer_accessor, | ||
const Key * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
__global__ void open3d::core::STDGPUFindKernel | ( | InternalStdGPUHashBackend< Key, Hash, Eq > | map, |
CUDAHashBackendBufferAccessor | buffer_accessor, | ||
const Key * | input_keys, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count | ||
) |
__global__ void open3d::core::STDGPUInsertKernel | ( | InternalStdGPUHashBackend< Key, Hash, Eq > | map, |
CUDAHashBackendBufferAccessor | buffer_accessor, | ||
const Key * | input_keys, | ||
const void *const * | input_values_soa, | ||
buf_index_t * | output_buf_indices, | ||
bool * | output_masks, | ||
int64_t | count, | ||
int64_t | n_values | ||
) |
Computes SVD decomposition A = U S VT, where A is an m x n, U is an m x m, S is a min(m, n), VT is an n x n tensor.
void open3d::core::SVDCPU | ( | const void * | A_data, |
void * | U_data, | ||
void * | S_data, | ||
void * | VT_data, | ||
void * | superb_data, | ||
int64_t | m, | ||
int64_t | n, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
void open3d::core::SVDCUDA | ( | const void * | A_data, |
void * | U_data, | ||
void * | S_data, | ||
void * | VT_data, | ||
void * | superb_data, | ||
int64_t | m, | ||
int64_t | n, | ||
Dtype | dtype, | ||
const Device & | device | ||
) |
SmallVector<ValueTypeFromRangeType<R>, Size> open3d::core::to_vector | ( | R && | Range | ) |
Given a range of type R, iterate the entire range and return a SmallVector with elements of the vector. This is useful, for example, when you want to iterate a range and then sort the results.
SmallVector<ValueTypeFromRangeType<R>, CalculateSmallVectorDefaultInlinedElements< ValueTypeFromRangeType<R> >::value> open3d::core::to_vector | ( | R && | Range | ) |
void open3d::core::TriulCPU | ( | const Tensor & | A, |
Tensor & | upper, | ||
Tensor & | lower, | ||
const int | diagonal | ||
) |
OPEN3D_API const Dtype open3d::core::Bool = Dtype::Bool |
OPEN3D_API const Dtype open3d::core::Float32 = Dtype::Float32 |
OPEN3D_API const Dtype open3d::core::Float64 = Dtype::Float64 |
OPEN3D_API const Dtype open3d::core::Int16 = Dtype::Int16 |
OPEN3D_API const Dtype open3d::core::Int32 = Dtype::Int32 |
OPEN3D_API const Dtype open3d::core::Int64 = Dtype::Int64 |
OPEN3D_API const Dtype open3d::core::Int8 = Dtype::Int8 |
|
constexpr |
class LLVM_GSL_OWNER open3d::core::SmallVector |
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference sizeof(SmallVector<T, 0>)
.
OPEN3D_API const Dtype open3d::core::UInt16 = Dtype::UInt16 |
OPEN3D_API const Dtype open3d::core::UInt32 = Dtype::UInt32 |
OPEN3D_API const Dtype open3d::core::UInt64 = Dtype::UInt64 |
OPEN3D_API const Dtype open3d::core::UInt8 = Dtype::UInt8 |
OPEN3D_API const Dtype open3d::core::Undefined = Dtype::Undefined |