ViennaCL - The Vienna Computing Library  1.7.0
Free open-source GPU-accelerated linear algebra and solver library.
ilut.hpp
Go to the documentation of this file.
1 #ifndef VIENNACL_LINALG_DETAIL_ILUT_HPP_
2 #define VIENNACL_LINALG_DETAIL_ILUT_HPP_
3 
4 /* =========================================================================
5  Copyright (c) 2010-2015, Institute for Microelectronics,
6  Institute for Analysis and Scientific Computing,
7  TU Wien.
8  Portions of this software are copyright by UChicago Argonne, LLC.
9 
10  -----------------
11  ViennaCL - The Vienna Computing Library
12  -----------------
13 
14  Project Head: Karl Rupp rupp@iue.tuwien.ac.at
15 
16  (A list of authors and contributors can be found in the manual)
17 
18  License: MIT (X11), see file LICENSE in the base directory
19 ============================================================================= */
20 
25 #include <vector>
26 #include <cmath>
27 #include <iostream>
28 #include "viennacl/forwards.h"
29 #include "viennacl/tools/tools.hpp"
30 
33 
35 
36 #include <map>
37 
38 namespace viennacl
39 {
40 namespace linalg
41 {
42 
45 class ilut_tag
46 {
47  public:
54  ilut_tag(unsigned int entries_per_row = 20,
55  double drop_tolerance = 1e-4,
56  bool with_level_scheduling = false)
57  : entries_per_row_(entries_per_row),
58  drop_tolerance_(drop_tolerance),
59  use_level_scheduling_(with_level_scheduling) {}
60 
61  void set_drop_tolerance(double tol)
62  {
63  if (tol > 0)
64  drop_tolerance_ = tol;
65  }
66  double get_drop_tolerance() const { return drop_tolerance_; }
67 
68  void set_entries_per_row(unsigned int e)
69  {
70  if (e > 0)
71  entries_per_row_ = e;
72  }
73 
74  unsigned int get_entries_per_row() const { return entries_per_row_; }
75 
76  bool use_level_scheduling() const { return use_level_scheduling_; }
77  void use_level_scheduling(bool b) { use_level_scheduling_ = b; }
78 
79  private:
80  unsigned int entries_per_row_;
81  double drop_tolerance_;
82  bool use_level_scheduling_;
83 };
84 
85 
86 namespace detail
87 {
93  template<typename NumericT>
95  {
96  ilut_sparse_vector(vcl_size_t alloc_size = 0) : size_(0), col_indices_(alloc_size), elements_(alloc_size) {}
97 
99  {
100  if (s > elements_.size())
101  {
102  col_indices_.resize(s);
103  elements_.resize(s);
104  }
105  size_ = s;
106  }
107 
109  std::vector<unsigned int> col_indices_;
110  std::vector<NumericT> elements_;
111  };
112 
119  template<typename IndexT, typename NumericT>
120  IndexT merge_subtract_sparse_rows(IndexT const * w_coords, NumericT const * w_elements, IndexT w_size,
121  IndexT const * u_coords, NumericT const * u_elements, IndexT u_size, NumericT alpha,
122  IndexT * z_coords, NumericT * z_elements)
123  {
124  IndexT index_w = 0;
125  IndexT index_u = 0;
126  IndexT index_z = 0;
127 
128  while (1)
129  {
130  if (index_w < w_size && index_u < u_size)
131  {
132  if (w_coords[index_w] < u_coords[index_u])
133  {
134  z_coords[index_z] = w_coords[index_w];
135  z_elements[index_z++] = w_elements[index_w++];
136  }
137  else if (w_coords[index_w] == u_coords[index_u])
138  {
139  z_coords[index_z] = w_coords[index_w];
140  z_elements[index_z++] = w_elements[index_w++] - alpha * u_elements[index_u++];
141  }
142  else
143  {
144  z_coords[index_z] = u_coords[index_u];
145  z_elements[index_z++] = - alpha * u_elements[index_u++];
146  }
147  }
148  else if (index_w == w_size && index_u < u_size)
149  {
150  z_coords[index_z] = u_coords[index_u];
151  z_elements[index_z++] = - alpha * u_elements[index_u++];
152  }
153  else if (index_w < w_size && index_u == u_size)
154  {
155  z_coords[index_z] = w_coords[index_w];
156  z_elements[index_z++] = w_elements[index_w++];
157  }
158  else
159  return index_z;
160  }
161  }
162 
163  template<typename SizeT, typename NumericT>
164  void insert_with_value_sort(std::vector<std::pair<SizeT, NumericT> > & map,
165  SizeT index, NumericT value)
166  {
167  NumericT abs_value = std::fabs(value);
168  if (abs_value > 0)
169  {
170  // find first element with smaller absolute value:
171  std::size_t first_smaller_index = 0;
172  while (first_smaller_index < map.size() && std::fabs(map[first_smaller_index].second) > abs_value)
173  ++first_smaller_index;
174 
175  std::pair<SizeT, NumericT> tmp(index, value);
176  for (std::size_t j=first_smaller_index; j<map.size(); ++j)
177  std::swap(map[j], tmp);
178  }
179  }
180 
181 }
182 
192 template<typename NumericT>
196  ilut_tag const & tag)
197 {
198  assert(A.size1() == L.size1() && bool("Output matrix size mismatch") );
199  assert(A.size1() == U.size1() && bool("Output matrix size mismatch") );
200 
201  L.reserve( tag.get_entries_per_row() * A.size1());
202  U.reserve((tag.get_entries_per_row() + 1) * A.size1());
203 
204  vcl_size_t avg_nnz_per_row = static_cast<vcl_size_t>(A.nnz() / A.size1());
205  detail::ilut_sparse_vector<NumericT> w1(tag.get_entries_per_row() * (avg_nnz_per_row + 10));
206  detail::ilut_sparse_vector<NumericT> w2(tag.get_entries_per_row() * (avg_nnz_per_row + 10));
209  std::vector<NumericT> diagonal_U(A.size1());
210 
211  NumericT const * elements_A = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(A.handle());
212  unsigned int const * row_buffer_A = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle1());
213  unsigned int const * col_buffer_A = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(A.handle2());
214 
215  NumericT * elements_L = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(L.handle());
216  unsigned int * row_buffer_L = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(L.handle1()); row_buffer_L[0] = 0;
217  unsigned int * col_buffer_L = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(L.handle2());
218 
219  NumericT * elements_U = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericT>(U.handle());
220  unsigned int * row_buffer_U = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(U.handle1()); row_buffer_U[0] = 0;
221  unsigned int * col_buffer_U = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(U.handle2());
222 
223  std::vector<std::pair<unsigned int, NumericT> > sorted_entries_L(tag.get_entries_per_row());
224  std::vector<std::pair<unsigned int, NumericT> > sorted_entries_U(tag.get_entries_per_row());
225 
226  for (vcl_size_t i=0; i<viennacl::traits::size1(A); ++i) // Line 1
227  {
228  std::fill(sorted_entries_L.begin(), sorted_entries_L.end(), std::pair<unsigned int, NumericT>(0, NumericT(0)));
229  std::fill(sorted_entries_U.begin(), sorted_entries_U.end(), std::pair<unsigned int, NumericT>(0, NumericT(0)));
230 
231  //line 2: set up w
232  w_in->resize_if_bigger(row_buffer_A[i+1] - row_buffer_A[i]);
233  NumericT row_norm = 0;
234  unsigned int k = 0;
235  for (unsigned int j = row_buffer_A[i]; j < row_buffer_A[i+1]; ++j, ++k)
236  {
237  w_in->col_indices_[k] = col_buffer_A[j];
238  NumericT entry = elements_A[j];
239  w_in->elements_[k] = entry;
240  row_norm += entry * entry;
241  }
242  row_norm = std::sqrt(row_norm);
243  NumericT tau_i = static_cast<NumericT>(tag.get_drop_tolerance()) * row_norm;
244 
245  //line 3: Iterate over lower diagonal parts of A:
246  k = 0;
247  unsigned int current_col = (row_buffer_A[i+1] > row_buffer_A[i]) ? w_in->col_indices_[k] : static_cast<unsigned int>(i); // mind empty rows here!
248  while (current_col < i)
249  {
250  //line 4:
251  NumericT a_kk = diagonal_U[current_col];
252 
253  NumericT w_k_entry = w_in->elements_[k] / a_kk;
254  w_in->elements_[k] = w_k_entry;
255 
256  //lines 5,6: (dropping rule to w_k)
257  if ( std::fabs(w_k_entry) > tau_i)
258  {
259  //line 7:
260  unsigned int row_U_begin = row_buffer_U[current_col];
261  unsigned int row_U_end = row_buffer_U[current_col + 1];
262 
263  if (row_U_end > row_U_begin)
264  {
265  w_out->resize_if_bigger(w_in->size_ + (row_U_end - row_U_begin) - 1);
266  w_out->size_ = detail::merge_subtract_sparse_rows(&(w_in->col_indices_[0]), &(w_in->elements_[0]), static_cast<unsigned int>(w_in->size_),
267  col_buffer_U + row_U_begin + 1, elements_U + row_U_begin + 1, (row_U_end - row_U_begin) - 1, w_k_entry,
268  &(w_out->col_indices_[0]), &(w_out->elements_[0])
269  );
270  ++k;
271  }
272  }
273  else // drop element
274  {
275  w_out->resize_if_bigger(w_in->size_ - 1);
276  for (unsigned int r = 0; r < k; ++r)
277  {
278  w_out->col_indices_[r] = w_in->col_indices_[r];
279  w_out->elements_[r] = w_in->elements_[r];
280  }
281  for (unsigned int r = k+1; r < w_in->size_; ++r)
282  {
283  w_out->col_indices_[r-1] = w_in->col_indices_[r];
284  w_out->elements_[r-1] = w_in->elements_[r];
285  }
286 
287  // Note: No increment to k here, element was dropped!
288  }
289 
290  // swap pointers to w1 and w2
291  std::swap(w_in, w_out);
292 
293  // process next entry:
294  current_col = (k < w_in->size_) ? w_in->col_indices_[k] : static_cast<unsigned int>(i);
295  } // while()
296 
297  // Line 10: Apply a dropping rule to w
298  // To do so, we write values to a temporary array
299  for (unsigned int r = 0; r < w_in->size_; ++r)
300  {
301  unsigned int col = w_in->col_indices_[r];
302  NumericT value = w_in->elements_[r];
303 
304  if (col < i) // entry for L:
305  detail::insert_with_value_sort(sorted_entries_L, col, value);
306  else if (col == i) // do not drop diagonal element
307  {
308  diagonal_U[i] = value;
309  if (value <= 0 && value >= 0)
310  {
311  std::cerr << "ViennaCL: FATAL ERROR in ILUT(): Diagonal entry computed to zero (" << value << ") in row " << i << "!" << std::endl;
312  throw zero_on_diagonal_exception("ILUT zero diagonal!");
313  }
314  }
315  else // entry for U:
316  detail::insert_with_value_sort(sorted_entries_U, col, value);
317  }
318 
319  //Lines 10-12: Apply a dropping rule to w, write the largest p values to L and U
320  unsigned int offset_L = row_buffer_L[i];
321  std::sort(sorted_entries_L.begin(), sorted_entries_L.end());
322  for (unsigned int j=0; j<tag.get_entries_per_row(); ++j)
323  if (std::fabs(sorted_entries_L[j].second) > 0)
324  {
325  col_buffer_L[offset_L] = sorted_entries_L[j].first;
326  elements_L[offset_L] = sorted_entries_L[j].second;
327  ++offset_L;
328  }
329  row_buffer_L[i+1] = offset_L;
330 
331  unsigned int offset_U = row_buffer_U[i];
332  col_buffer_U[offset_U] = static_cast<unsigned int>(i);
333  elements_U[offset_U] = diagonal_U[i];
334  ++offset_U;
335  std::sort(sorted_entries_U.begin(), sorted_entries_U.end());
336  for (unsigned int j=0; j<tag.get_entries_per_row(); ++j)
337  if (std::fabs(sorted_entries_U[j].second) > 0)
338  {
339  col_buffer_U[offset_U] = sorted_entries_U[j].first;
340  elements_U[offset_U] = sorted_entries_U[j].second;
341  ++offset_U;
342  }
343  row_buffer_U[i+1] = offset_U;
344 
345  } //for i
346 }
347 
348 
351 template<typename MatrixT>
353 {
354  typedef typename MatrixT::value_type NumericType;
355 
356 public:
357  ilut_precond(MatrixT const & mat, ilut_tag const & tag) : tag_(tag), L_(mat.size1(), mat.size2()), U_(mat.size1(), mat.size2())
358  {
359  //initialize preconditioner:
360  //std::cout << "Start CPU precond" << std::endl;
361  init(mat);
362  //std::cout << "End CPU precond" << std::endl;
363  }
364 
365  template<typename VectorT>
366  void apply(VectorT & vec) const
367  {
368  //Note: Since vec can be a rather arbitrary vector type, we call the more generic version in the backend manually:
369  {
370  unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(L_.handle1());
371  unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(L_.handle2());
372  NumericType const * elements = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericType>(L_.handle());
373 
374  viennacl::linalg::host_based::detail::csr_inplace_solve<NumericType>(row_buffer, col_buffer, elements, vec, L_.size2(), unit_lower_tag());
375  }
376  {
377  unsigned int const * row_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(U_.handle1());
378  unsigned int const * col_buffer = viennacl::linalg::host_based::detail::extract_raw_pointer<unsigned int>(U_.handle2());
379  NumericType const * elements = viennacl::linalg::host_based::detail::extract_raw_pointer<NumericType>(U_.handle());
380 
381  viennacl::linalg::host_based::detail::csr_inplace_solve<NumericType>(row_buffer, col_buffer, elements, vec, U_.size2(), upper_tag());
382  }
383  }
384 
385 private:
386  void init(MatrixT const & mat)
387  {
390  viennacl::switch_memory_context(temp, host_context);
391  viennacl::switch_memory_context(L_, host_context);
392  viennacl::switch_memory_context(U_, host_context);
393 
394  viennacl::copy(mat, temp);
395 
396  viennacl::linalg::precondition(temp, L_, U_, tag_);
397  }
398 
399  ilut_tag tag_;
402 };
403 
404 
409 template<typename NumericT, unsigned int AlignmentV>
410 class ilut_precond< viennacl::compressed_matrix<NumericT, AlignmentV> >
411 {
413 
414 public:
415  ilut_precond(MatrixType const & mat, ilut_tag const & tag)
416  : tag_(tag),
417  L_(mat.size1(), mat.size2(), viennacl::traits::context(mat)),
418  U_(mat.size1(), mat.size2(), viennacl::traits::context(mat))
419  {
420  //initialize preconditioner:
421  //std::cout << "Start GPU precond" << std::endl;
422  init(mat);
423  //std::cout << "End GPU precond" << std::endl;
424  }
425 
427  {
429  {
430  if (tag_.use_level_scheduling())
431  {
432  //std::cout << "Using multifrontal on GPU..." << std::endl;
434  multifrontal_L_row_index_arrays_,
435  multifrontal_L_row_buffers_,
436  multifrontal_L_col_buffers_,
437  multifrontal_L_element_buffers_,
438  multifrontal_L_row_elimination_num_list_);
439 
440  vec = viennacl::linalg::element_div(vec, multifrontal_U_diagonal_);
441 
443  multifrontal_U_row_index_arrays_,
444  multifrontal_U_row_buffers_,
445  multifrontal_U_col_buffers_,
446  multifrontal_U_element_buffers_,
447  multifrontal_U_row_elimination_num_list_);
448 
449  }
450  else
451  {
453  viennacl::context old_context = viennacl::traits::context(vec);
454  viennacl::switch_memory_context(vec, host_context);
457  viennacl::switch_memory_context(vec, old_context);
458  }
459  }
460  else //apply ILUT directly:
461  {
464  }
465  }
466 
467 private:
468  void init(MatrixType const & mat)
469  {
471  viennacl::switch_memory_context(L_, host_context);
472  viennacl::switch_memory_context(U_, host_context);
473 
474  if (viennacl::traits::context(mat).memory_type() == viennacl::MAIN_MEMORY)
475  {
476  viennacl::linalg::precondition(mat, L_, U_, tag_);
477  }
478  else //we need to copy to CPU
479  {
480  viennacl::compressed_matrix<NumericT> cpu_mat(mat.size1(), mat.size2(), viennacl::traits::context(mat));
481  viennacl::switch_memory_context(cpu_mat, host_context);
482 
483  cpu_mat = mat;
484 
485  viennacl::linalg::precondition(cpu_mat, L_, U_, tag_);
486  }
487 
488  if (!tag_.use_level_scheduling())
489  return;
490 
491  //
492  // multifrontal part:
493  //
494 
495  viennacl::switch_memory_context(multifrontal_U_diagonal_, host_context);
496  multifrontal_U_diagonal_.resize(U_.size1(), false);
498 
500  multifrontal_U_diagonal_, //dummy
501  multifrontal_L_row_index_arrays_,
502  multifrontal_L_row_buffers_,
503  multifrontal_L_col_buffers_,
504  multifrontal_L_element_buffers_,
505  multifrontal_L_row_elimination_num_list_);
506 
507 
509  multifrontal_U_diagonal_,
510  multifrontal_U_row_index_arrays_,
511  multifrontal_U_row_buffers_,
512  multifrontal_U_col_buffers_,
513  multifrontal_U_element_buffers_,
514  multifrontal_U_row_elimination_num_list_);
515 
516  //
517  // Bring to device if necessary:
518  //
519 
520  // L:
521 
522  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_L_row_index_arrays_.begin();
523  it != multifrontal_L_row_index_arrays_.end();
524  ++it)
525  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
526 
527  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_L_row_buffers_.begin();
528  it != multifrontal_L_row_buffers_.end();
529  ++it)
530  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
531 
532  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_L_col_buffers_.begin();
533  it != multifrontal_L_col_buffers_.end();
534  ++it)
535  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
536 
537  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_L_element_buffers_.begin();
538  it != multifrontal_L_element_buffers_.end();
539  ++it)
540  viennacl::backend::switch_memory_context<NumericT>(*it, viennacl::traits::context(mat));
541 
542 
543  // U:
544 
545  viennacl::switch_memory_context(multifrontal_U_diagonal_, viennacl::traits::context(mat));
546 
547  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_U_row_index_arrays_.begin();
548  it != multifrontal_U_row_index_arrays_.end();
549  ++it)
550  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
551 
552  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_U_row_buffers_.begin();
553  it != multifrontal_U_row_buffers_.end();
554  ++it)
555  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
556 
557  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_U_col_buffers_.begin();
558  it != multifrontal_U_col_buffers_.end();
559  ++it)
560  viennacl::backend::switch_memory_context<unsigned int>(*it, viennacl::traits::context(mat));
561 
562  for (typename std::list< viennacl::backend::mem_handle >::iterator it = multifrontal_U_element_buffers_.begin();
563  it != multifrontal_U_element_buffers_.end();
564  ++it)
565  viennacl::backend::switch_memory_context<NumericT>(*it, viennacl::traits::context(mat));
566 
567 
568  }
569 
570  ilut_tag tag_;
573 
574  std::list<viennacl::backend::mem_handle> multifrontal_L_row_index_arrays_;
575  std::list<viennacl::backend::mem_handle> multifrontal_L_row_buffers_;
576  std::list<viennacl::backend::mem_handle> multifrontal_L_col_buffers_;
577  std::list<viennacl::backend::mem_handle> multifrontal_L_element_buffers_;
578  std::list<vcl_size_t > multifrontal_L_row_elimination_num_list_;
579 
580  viennacl::vector<NumericT> multifrontal_U_diagonal_;
581  std::list<viennacl::backend::mem_handle> multifrontal_U_row_index_arrays_;
582  std::list<viennacl::backend::mem_handle> multifrontal_U_row_buffers_;
583  std::list<viennacl::backend::mem_handle> multifrontal_U_col_buffers_;
584  std::list<viennacl::backend::mem_handle> multifrontal_U_element_buffers_;
585  std::list<vcl_size_t > multifrontal_U_row_elimination_num_list_;
586 };
587 
588 } // namespace linalg
589 } // namespace viennacl
590 
591 
592 
593 
594 #endif
595 
596 
597 
const vcl_size_t & size2() const
Returns the number of columns.
void fill(MatrixType &matrix, vcl_size_t row_index, vcl_size_t col_index, NumericT value)
Generic filler routine for setting an entry of a matrix to a particular value.
Definition: fill.hpp:46
viennacl::vector_expression< const vector_base< T >, const vector_base< T >, op_element_binary< op_div > > element_div(vector_base< T > const &v1, vector_base< T > const &v2)
void inplace_solve(const matrix_base< NumericT > &A, matrix_base< NumericT > &B, SolverTagT)
Direct inplace solver for triangular systems with multiple right hand sides, i.e. A \ B (MATLAB notat...
IndexT merge_subtract_sparse_rows(IndexT const *w_coords, NumericT const *w_elements, IndexT w_size, IndexT const *u_coords, NumericT const *u_elements, IndexT u_size, NumericT alpha, IndexT *z_coords, NumericT *z_elements)
Subtracts a scaled sparse vector u from a sparse vector w and writes the output to z: z = w - alpha *...
Definition: ilut.hpp:120
ilut_precond(MatrixT const &mat, ilut_tag const &tag)
Definition: ilut.hpp:357
const vcl_size_t & size1() const
Returns the number of rows.
Various little tools used here and there in ViennaCL.
vcl_size_t size1(MatrixType const &mat)
Generic routine for obtaining the number of rows of a matrix (ViennaCL, uBLAS, etc.)
Definition: size.hpp:163
std::vector< unsigned int > col_indices_
Definition: ilut.hpp:109
void set_entries_per_row(unsigned int e)
Definition: ilut.hpp:68
void precondition(viennacl::compressed_matrix< NumericT > &A, ilu0_tag const &)
Implementation of a ILU-preconditioner with static pattern. Optimized version for CSR matrices...
Definition: ilu0.hpp:78
This file provides the forward declarations for the main types used within ViennaCL.
bool use_level_scheduling() const
Definition: ilut.hpp:76
void level_scheduling_setup_U(viennacl::compressed_matrix< NumericT, AlignmentV > const &LU, viennacl::vector< NumericT > const &diagonal_LU, std::list< viennacl::backend::mem_handle > &row_index_arrays, std::list< viennacl::backend::mem_handle > &row_buffers, std::list< viennacl::backend::mem_handle > &col_buffers, std::list< viennacl::backend::mem_handle > &element_buffers, std::list< vcl_size_t > &row_elimination_num_list)
Definition: common.hpp:208
result_of::size_type< MatrixType >::type size2(MatrixType const &mat)
Generic routine for obtaining the number of columns of a matrix (ViennaCL, uBLAS, etc...
Definition: size.hpp:201
void level_scheduling_substitute(viennacl::vector< NumericT > &vec, std::list< viennacl::backend::mem_handle > const &row_index_arrays, std::list< viennacl::backend::mem_handle > const &row_buffers, std::list< viennacl::backend::mem_handle > const &col_buffers, std::list< viennacl::backend::mem_handle > const &element_buffers, std::list< vcl_size_t > const &row_elimination_num_list)
Definition: common.hpp:224
const handle_type & handle() const
Returns the OpenCL handle to the matrix entry array.
const handle_type & handle1() const
Returns the OpenCL handle to the row index array.
const vcl_size_t & nnz() const
Returns the number of nonzero entries.
float NumericT
Definition: bisect.cpp:40
Represents a generic 'context' similar to an OpenCL context, but is backend-agnostic and thus also su...
Definition: context.hpp:39
Main namespace in ViennaCL. Holds all the basic types such as vector, matrix, etc. and defines operations upon them.
Definition: cpu_ram.hpp:34
void apply(VectorT &vec) const
Definition: ilut.hpp:366
Definition: blas3.hpp:36
A tag class representing an upper triangular matrix.
Definition: forwards.h:854
A tag for incomplete LU factorization with threshold (ILUT)
Definition: ilut.hpp:45
Implementation of the compressed_matrix class.
unsigned int get_entries_per_row() const
Definition: ilut.hpp:74
const handle_type & handle2() const
Returns the OpenCL handle to the column index array.
ILUT preconditioner class, can be supplied to solve()-routines.
Definition: ilut.hpp:352
void insert_with_value_sort(std::vector< std::pair< SizeT, NumericT > > &map, SizeT index, NumericT value)
Definition: ilut.hpp:164
std::size_t vcl_size_t
Definition: forwards.h:75
Common routines for single-threaded or OpenMP-enabled execution on CPU.
viennacl::enable_if< viennacl::is_scalar< ScalarT1 >::value &&viennacl::is_scalar< ScalarT2 >::value >::type swap(ScalarT1 &s1, ScalarT2 &s2)
Swaps the contents of two scalars, data is copied.
ilut_sparse_vector(vcl_size_t alloc_size=0)
Definition: ilut.hpp:96
void reserve(vcl_size_t new_nonzeros, bool preserve=true)
Allocate memory for the supplied number of nonzeros in the matrix. Old values are preserved...
void set_drop_tolerance(double tol)
Definition: ilut.hpp:61
viennacl::context context(T const &t)
Returns an ID for the currently active memory domain of an object.
Definition: context.hpp:40
void copy(std::vector< NumericT > &cpu_vec, circulant_matrix< NumericT, AlignmentV > &gpu_mat)
Copies a circulant matrix from the std::vector to the OpenCL device (either GPU or multi-core CPU) ...
void use_level_scheduling(bool b)
Definition: ilut.hpp:77
std::vector< NumericT > elements_
Definition: ilut.hpp:110
A tag class representing a lower triangular matrix with unit diagonal.
Definition: forwards.h:859
Helper struct for holding a sparse vector in linear memory. For internal use only.
Definition: ilut.hpp:94
void row_info(compressed_matrix< NumericT, AlignmentV > const &mat, vector_base< NumericT > &vec, viennacl::linalg::detail::row_info_types info_selector)
void level_scheduling_setup_L(viennacl::compressed_matrix< NumericT, AlignmentV > const &LU, viennacl::vector< NumericT > const &diagonal_LU, std::list< viennacl::backend::mem_handle > &row_index_arrays, std::list< viennacl::backend::mem_handle > &row_buffers, std::list< viennacl::backend::mem_handle > &col_buffers, std::list< viennacl::backend::mem_handle > &element_buffers, std::list< vcl_size_t > &row_elimination_num_list)
Definition: common.hpp:191
Common routines used within ILU-type preconditioners.
const handle_type & handle() const
Returns the memory handle.
Definition: vector_def.hpp:128
ilut_tag(unsigned int entries_per_row=20, double drop_tolerance=1e-4, bool with_level_scheduling=false)
The constructor.
Definition: ilut.hpp:54
memory_types get_active_handle_id() const
Returns an ID for the currently active memory buffer. Other memory buffers might contain old or no da...
Definition: mem_handle.hpp:118
double get_drop_tolerance() const
Definition: ilut.hpp:66
void switch_memory_context(T &obj, viennacl::context new_ctx)
Generic convenience routine for migrating data of an object to a new memory domain.
Definition: memory.hpp:622