16 #if defined(__CUDACC__)
17 void FillInRigidAlignmentTermCUDA
32 int64_t n = Ti_ps.GetLength();
33 if (Tj_qs.GetLength() != n || Ri_normal_ps.GetLength() != n) {
35 "Unable to setup linear system: input length mismatch.");
43 float *AtA_local_ptr =
static_cast<float *
>(AtA_local.
GetDataPtr());
44 float *Atb_local_ptr =
static_cast<float *
>(Atb_local.GetDataPtr());
45 float *residual_ptr =
static_cast<float *
>(residual.GetDataPtr());
47 const float *Ti_ps_ptr =
static_cast<const float *
>(Ti_ps.GetDataPtr());
48 const float *Tj_qs_ptr =
static_cast<const float *
>(Tj_qs.GetDataPtr());
49 const float *Ri_normal_ps_ptr =
50 static_cast<const float *
>(Ri_normal_ps.GetDataPtr());
54 const float *p_prime = Ti_ps_ptr + 3 * workload_idx;
55 const float *q_prime = Tj_qs_ptr + 3 * workload_idx;
56 const float *normal_p_prime =
57 Ri_normal_ps_ptr + 3 * workload_idx;
59 float r = (p_prime[0] - q_prime[0]) * normal_p_prime[0] +
60 (p_prime[1] - q_prime[1]) * normal_p_prime[1] +
61 (p_prime[2] - q_prime[2]) * normal_p_prime[2];
62 if (abs(r) > threshold)
return;
65 J_ij[0] = -q_prime[2] * normal_p_prime[1] +
66 q_prime[1] * normal_p_prime[2];
67 J_ij[1] = q_prime[2] * normal_p_prime[0] -
68 q_prime[0] * normal_p_prime[2];
69 J_ij[2] = -q_prime[1] * normal_p_prime[0] +
70 q_prime[0] * normal_p_prime[1];
71 J_ij[3] = normal_p_prime[0];
72 J_ij[4] = normal_p_prime[1];
73 J_ij[5] = normal_p_prime[2];
74 for (
int k = 0; k < 6; ++k) {
75 J_ij[k + 6] = -J_ij[k];
79 #if defined(BUILD_CUDA_MODULE) && defined(__CUDACC__)
80 for (
int i_local = 0; i_local < 12; ++i_local) {
81 for (
int j_local = 0; j_local < 12; ++j_local) {
82 atomicAdd(&AtA_local_ptr[i_local * 12 + j_local],
83 J_ij[i_local] * J_ij[j_local]);
85 atomicAdd(&Atb_local_ptr[i_local], J_ij[i_local] * r);
87 atomicAdd(residual_ptr, r * r);
89 #pragma omp critical(FillInRigidAlignmentTermCPU)
91 for (
int i_local = 0; i_local < 12; ++i_local) {
92 for (
int j_local = 0; j_local < 12; ++j_local) {
93 AtA_local_ptr[i_local * 12 + j_local]
94 += J_ij[i_local] * J_ij[j_local];
96 Atb_local_ptr[i_local] += J_ij[i_local] * r;
98 *residual_ptr += r * r;
104 std::vector<int64_t> indices_vec(12);
105 for (
int k = 0; k < 6; ++k) {
106 indices_vec[k] = i * 6 + k;
107 indices_vec[k + 6] = j * 6 + k;
110 std::vector<int64_t> indices_i_vec;
111 std::vector<int64_t> indices_j_vec;
112 for (
int local_i = 0; local_i < 12; ++local_i) {
113 for (
int local_j = 0; local_j < 12; ++local_j) {
114 indices_i_vec.push_back(indices_vec[local_i]);
115 indices_j_vec.push_back(indices_vec[local_j]);
124 AtA.
IndexSet({indices_i, indices_j}, AtA_sub + AtA_local.
View({12 * 12}));
127 Atb.
IndexSet({indices}, Atb_sub + Atb_local.
View({12, 1}));
130 #if defined(__CUDACC__)
131 void FillInSLACAlignmentTermCUDA
151 int64_t n = Ti_Cps.GetLength();
152 if (Tj_Cqs.GetLength() != n || Cnormal_ps.GetLength() != n ||
153 Ri_Cnormal_ps.GetLength() != n || RjT_Ri_Cnormal_ps.GetLength() != n ||
154 cgrid_idx_ps.GetLength() != n || cgrid_ratio_ps.GetLength() != n ||
155 cgrid_idx_qs.GetLength() != n || cgrid_ratio_qs.GetLength() != n) {
157 "Unable to setup linear system: input length mismatch.");
160 int n_vars = Atb.GetLength();
161 float *AtA_ptr =
static_cast<float *
>(AtA.GetDataPtr());
162 float *Atb_ptr =
static_cast<float *
>(Atb.GetDataPtr());
163 float *residual_ptr =
static_cast<float *
>(residual.GetDataPtr());
166 const float *Ti_Cps_ptr =
static_cast<const float *
>(Ti_Cps.GetDataPtr());
167 const float *Tj_Cqs_ptr =
static_cast<const float *
>(Tj_Cqs.GetDataPtr());
168 const float *Cnormal_ps_ptr =
169 static_cast<const float *
>(Cnormal_ps.GetDataPtr());
170 const float *Ri_Cnormal_ps_ptr =
171 static_cast<const float *
>(Ri_Cnormal_ps.GetDataPtr());
172 const float *RjT_Ri_Cnormal_ps_ptr =
173 static_cast<const float *
>(RjT_Ri_Cnormal_ps.GetDataPtr());
176 const int *cgrid_idx_ps_ptr =
177 static_cast<const int *
>(cgrid_idx_ps.GetDataPtr());
178 const int *cgrid_idx_qs_ptr =
179 static_cast<const int *
>(cgrid_idx_qs.GetDataPtr());
180 const float *cgrid_ratio_ps_ptr =
181 static_cast<const float *
>(cgrid_ratio_ps.GetDataPtr());
182 const float *cgrid_ratio_qs_ptr =
183 static_cast<const float *
>(cgrid_ratio_qs.GetDataPtr());
186 AtA.GetDevice(), n, [=]
OPEN3D_DEVICE(int64_t workload_idx) {
187 const float *Ti_Cp = Ti_Cps_ptr + 3 * workload_idx;
188 const float *Tj_Cq = Tj_Cqs_ptr + 3 * workload_idx;
189 const float *Cnormal_p = Cnormal_ps_ptr + 3 * workload_idx;
190 const float *Ri_Cnormal_p =
191 Ri_Cnormal_ps_ptr + 3 * workload_idx;
192 const float *RjTRi_Cnormal_p =
193 RjT_Ri_Cnormal_ps_ptr + 3 * workload_idx;
195 const int *cgrid_idx_p = cgrid_idx_ps_ptr + 8 * workload_idx;
196 const int *cgrid_idx_q = cgrid_idx_qs_ptr + 8 * workload_idx;
197 const float *cgrid_ratio_p =
198 cgrid_ratio_ps_ptr + 8 * workload_idx;
199 const float *cgrid_ratio_q =
200 cgrid_ratio_qs_ptr + 8 * workload_idx;
202 float r = (Ti_Cp[0] - Tj_Cq[0]) * Ri_Cnormal_p[0] +
203 (Ti_Cp[1] - Tj_Cq[1]) * Ri_Cnormal_p[1] +
204 (Ti_Cp[2] - Tj_Cq[2]) * Ri_Cnormal_p[2];
205 if (abs(r) > threshold)
return;
212 J[0] = -Tj_Cq[2] * Ri_Cnormal_p[1] + Tj_Cq[1] * Ri_Cnormal_p[2];
213 J[1] = Tj_Cq[2] * Ri_Cnormal_p[0] - Tj_Cq[0] * Ri_Cnormal_p[2];
214 J[2] = -Tj_Cq[1] * Ri_Cnormal_p[0] + Tj_Cq[0] * Ri_Cnormal_p[1];
215 J[3] = Ri_Cnormal_p[0];
216 J[4] = Ri_Cnormal_p[1];
217 J[5] = Ri_Cnormal_p[2];
220 for (
int k = 0; k < 6; ++k) {
223 idx[k + 0] = 6 * i + k;
224 idx[k + 6] = 6 * j + k;
228 for (
int k = 0; k < 8; ++k) {
229 J[12 + k * 3 + 0] = cgrid_ratio_p[k] * Cnormal_p[0];
230 J[12 + k * 3 + 1] = cgrid_ratio_p[k] * Cnormal_p[1];
231 J[12 + k * 3 + 2] = cgrid_ratio_p[k] * Cnormal_p[2];
233 idx[12 + k * 3 + 0] = 6 * n_frags + cgrid_idx_p[k] * 3 + 0;
234 idx[12 + k * 3 + 1] = 6 * n_frags + cgrid_idx_p[k] * 3 + 1;
235 idx[12 + k * 3 + 2] = 6 * n_frags + cgrid_idx_p[k] * 3 + 2;
239 for (
int k = 0; k < 8; ++k) {
240 J[36 + k * 3 + 0] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[0];
241 J[36 + k * 3 + 1] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[1];
242 J[36 + k * 3 + 2] = -cgrid_ratio_q[k] * RjTRi_Cnormal_p[2];
244 idx[36 + k * 3 + 0] = 6 * n_frags + cgrid_idx_q[k] * 3 + 0;
245 idx[36 + k * 3 + 1] = 6 * n_frags + cgrid_idx_q[k] * 3 + 1;
246 idx[36 + k * 3 + 2] = 6 * n_frags + cgrid_idx_q[k] * 3 + 2;
250 #if defined(__CUDACC__)
251 for (
int ki = 0; ki < 60; ++ki) {
252 for (
int kj = 0; kj < 60; ++kj) {
253 float AtA_ij = J[ki] * J[kj];
254 int ij = idx[ki] * n_vars + idx[kj];
255 atomicAdd(AtA_ptr + ij, AtA_ij);
257 float Atb_i = J[ki] * r;
258 atomicAdd(Atb_ptr + idx[ki], Atb_i);
260 atomicAdd(residual_ptr, r * r);
262 #pragma omp critical(FillInSLACAlignmentTermCPU)
264 for (
int ki = 0; ki < 60; ++ki) {
265 for (
int kj = 0; kj < 60; ++kj) {
266 AtA_ptr[idx[ki] * n_vars + idx[kj]]
269 Atb_ptr[idx[ki]] += J[ki] * r;
271 *residual_ptr += r * r;
277 #if defined(__CUDACC__)
278 void FillInSLACRegularizerTermCUDA
294 int64_t n = grid_idx.GetLength();
295 int64_t n_vars = Atb.GetLength();
297 float *AtA_ptr =
static_cast<float *
>(AtA.GetDataPtr());
298 float *Atb_ptr =
static_cast<float *
>(Atb.GetDataPtr());
299 float *residual_ptr =
static_cast<float *
>(residual.GetDataPtr());
301 const int *grid_idx_ptr =
static_cast<const int *
>(grid_idx.GetDataPtr());
302 const int *grid_nbs_idx_ptr =
303 static_cast<const int *
>(grid_nbs_idx.GetDataPtr());
304 const bool *grid_nbs_mask_ptr =
305 static_cast<const bool *
>(grid_nbs_mask.GetDataPtr());
307 const float *positions_init_ptr =
308 static_cast<const float *
>(positions_init.GetDataPtr());
309 const float *positions_curr_ptr =
310 static_cast<const float *
>(positions_curr.GetDataPtr());
313 AtA.GetDevice(), n, [=]
OPEN3D_DEVICE(int64_t workload_idx) {
315 int idx_i = grid_idx_ptr[workload_idx];
317 const int *idx_nbs = grid_nbs_idx_ptr + 6 * workload_idx;
318 const bool *mask_nbs = grid_nbs_mask_ptr + 6 * workload_idx;
321 float cov[3][3] = {{0}};
322 float U[3][3], V[3][3], S[3];
325 for (
int k = 0; k < 6; ++k) {
326 bool mask_k = mask_nbs[k];
327 if (!mask_k)
continue;
329 int idx_k = idx_nbs[k];
332 float diff_ik_init[3] = {
333 positions_init_ptr[idx_i * 3 + 0] -
334 positions_init_ptr[idx_k * 3 + 0],
335 positions_init_ptr[idx_i * 3 + 1] -
336 positions_init_ptr[idx_k * 3 + 1],
337 positions_init_ptr[idx_i * 3 + 2] -
338 positions_init_ptr[idx_k * 3 + 2]};
339 float diff_ik_curr[3] = {
340 positions_curr_ptr[idx_i * 3 + 0] -
341 positions_curr_ptr[idx_k * 3 + 0],
342 positions_curr_ptr[idx_i * 3 + 1] -
343 positions_curr_ptr[idx_k * 3 + 1],
344 positions_curr_ptr[idx_i * 3 + 2] -
345 positions_curr_ptr[idx_k * 3 + 2]};
349 for (
int i = 0; i < 3; ++i) {
350 for (
int j = 0; j < 3; ++j) {
351 cov[i][j] += diff_ik_init[i] * diff_ik_curr[j];
378 if (idx_i == anchor_idx) {
379 R[0][0] = R[1][1] = R[2][2] = 1;
380 R[0][1] = R[0][2] = R[1][0] = R[1][2] = R[2][0] = R[2][1] =
383 for (
int k = 0; k < 6; ++k) {
384 bool mask_k = mask_nbs[k];
387 int idx_k = idx_nbs[k];
389 float diff_ik_init[3] = {
390 positions_init_ptr[idx_i * 3 + 0] -
391 positions_init_ptr[idx_k * 3 + 0],
392 positions_init_ptr[idx_i * 3 + 1] -
393 positions_init_ptr[idx_k * 3 + 1],
394 positions_init_ptr[idx_i * 3 + 2] -
395 positions_init_ptr[idx_k * 3 + 2]};
396 float diff_ik_curr[3] = {
397 positions_curr_ptr[idx_i * 3 + 0] -
398 positions_curr_ptr[idx_k * 3 + 0],
399 positions_curr_ptr[idx_i * 3 + 1] -
400 positions_curr_ptr[idx_k * 3 + 1],
401 positions_curr_ptr[idx_i * 3 + 2] -
402 positions_curr_ptr[idx_k * 3 + 2]};
403 float R_diff_ik_curr[3];
405 core::linalg::kernel::matmul3x3_3x1(*R, diff_ik_init,
409 local_r[0] = diff_ik_curr[0] - R_diff_ik_curr[0];
410 local_r[1] = diff_ik_curr[1] - R_diff_ik_curr[1];
411 local_r[2] = diff_ik_curr[2] - R_diff_ik_curr[2];
413 int offset_idx_i = 3 * idx_i + 6 * n_frags;
414 int offset_idx_k = 3 * idx_k + 6 * n_frags;
416 #if defined(__CUDACC__)
418 atomicAdd(residual_ptr,
419 weight * (local_r[0] * local_r[0] +
420 local_r[1] * local_r[1] +
421 local_r[2] * local_r[2]));
423 for (
int axis = 0; axis < 3; ++axis) {
425 atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars +
426 offset_idx_i + axis],
428 atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars +
429 offset_idx_k + axis],
431 atomicAdd(&AtA_ptr[(offset_idx_i + axis) * n_vars +
432 offset_idx_k + axis],
434 atomicAdd(&AtA_ptr[(offset_idx_k + axis) * n_vars +
435 offset_idx_i + axis],
439 atomicAdd(&Atb_ptr[offset_idx_i + axis],
440 +weight * local_r[axis]);
441 atomicAdd(&Atb_ptr[offset_idx_k + axis],
442 -weight * local_r[axis]);
445 #pragma omp critical(FillInSLACRegularizerTermCPU)
448 *residual_ptr += weight * (local_r[0] * local_r[0] +
449 local_r[1] * local_r[1] +
450 local_r[2] * local_r[2]);
452 for (
int axis = 0; axis < 3; ++axis) {
454 AtA_ptr[(offset_idx_i + axis) * n_vars +
455 offset_idx_i + axis] += weight;
456 AtA_ptr[(offset_idx_k + axis) * n_vars +
457 offset_idx_k + axis] += weight;
459 AtA_ptr[(offset_idx_i + axis) * n_vars +
460 offset_idx_k + axis] -= weight;
461 AtA_ptr[(offset_idx_k + axis) * n_vars +
462 offset_idx_i + axis] -= weight;
465 Atb_ptr[offset_idx_i + axis] += weight * local_r[axis];
466 Atb_ptr[offset_idx_k + axis] -= weight * local_r[axis];
#define OPEN3D_DEVICE
Definition: CUDAUtils.h:45
#define LogError(...)
Definition: Logging.h:48
T * GetDataPtr()
Definition: Tensor.h:1133
Tensor View(const SizeVector &dst_shape) const
Definition: Tensor.cpp:688
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:373
void IndexSet(const std::vector< Tensor > &index_tensors, const Tensor &src_tensor)
Advanced indexing getter.
Definition: Tensor.cpp:903
Tensor IndexGet(const std::vector< Tensor > &index_tensors) const
Advanced indexing getter. This will always allocate a new Tensor.
Definition: Tensor.cpp:872
OPEN3D_DEVICE OPEN3D_FORCE_INLINE void transpose3x3_(scalar_t *A_3x3)
Definition: Matrix.h:151
OPEN3D_DEVICE OPEN3D_FORCE_INLINE void matmul3x3_3x3(const scalar_t *A_3x3, const scalar_t *B_3x3, scalar_t *C_3x3)
Definition: Matrix.h:48
OPEN3D_DEVICE OPEN3D_FORCE_INLINE void svd3x3(const scalar_t *A_3x3, scalar_t *U_3x3, scalar_t *S_3x1, scalar_t *V_3x3)
OPEN3D_DEVICE OPEN3D_FORCE_INLINE scalar_t det3x3(const scalar_t *A_3x3)
Definition: Matrix.h:89
const Dtype Int64
Definition: Dtype.cpp:47
void ParallelFor(const Device &device, int64_t n, const func_t &func)
Definition: ParallelFor.h:103
const Dtype Float32
Definition: Dtype.cpp:42
void FillInSLACAlignmentTermCPU(core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_qs, const core::Tensor &Tj_qs, const core::Tensor &normal_ps, const core::Tensor &Ri_normal_ps, const core::Tensor &RjT_Ri_normal_ps, const core::Tensor &cgrid_idx_ps, const core::Tensor &cgrid_idx_qs, const core::Tensor &cgrid_ratio_qs, const core::Tensor &cgrid_ratio_ps, int i, int j, int n, float threshold)
Definition: FillInLinearSystemImpl.h:135
void FillInRigidAlignmentTermCPU(core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &Ti_qs, const core::Tensor &Tj_qs, const core::Tensor &Ri_normal_ps, int i, int j, float threshold)
Definition: FillInLinearSystemImpl.h:21
void FillInSLACRegularizerTermCPU(core::Tensor &AtA, core::Tensor &Atb, core::Tensor &residual, const core::Tensor &grid_idx, const core::Tensor &grid_nbs_idx, const core::Tensor &grid_nbs_mask, const core::Tensor &positions_init, const core::Tensor &positions_curr, float weight, int n, int anchor_idx)
Definition: FillInLinearSystemImpl.h:282
Definition: PinholeCameraIntrinsic.cpp:16