ViennaCL - The Vienna Computing Library  1.7.0
Free open-source GPU-accelerated linear algebra and solver library.
ell_matrix.hpp
Go to the documentation of this file.
1 #ifndef VIENNACL_ELL_MATRIX_HPP_
2 #define VIENNACL_ELL_MATRIX_HPP_
3 
4 /* =========================================================================
5  Copyright (c) 2010-2015, Institute for Microelectronics,
6  Institute for Analysis and Scientific Computing,
7  TU Wien.
8  Portions of this software are copyright by UChicago Argonne, LLC.
9 
10  -----------------
11  ViennaCL - The Vienna Computing Library
12  -----------------
13 
14  Project Head: Karl Rupp rupp@iue.tuwien.ac.at
15 
16  (A list of authors and contributors can be found in the manual)
17 
18  License: MIT (X11), see file LICENSE in the base directory
19 ============================================================================= */
20 
28 #include "viennacl/forwards.h"
29 #include "viennacl/vector.hpp"
30 
31 #include "viennacl/tools/tools.hpp"
32 
34 
35 namespace viennacl
36 {
52 template<typename NumericT, unsigned int AlignmentV /* see forwards.h for default argument */>
54 {
55 public:
59 
60  ell_matrix() : rows_(0), cols_(0), maxnnz_(0) {}
61 
62  ell_matrix(viennacl::context ctx) : rows_(0), cols_(0), maxnnz_(0)
63  {
64  coords_.switch_active_handle_id(ctx.memory_type());
65  elements_.switch_active_handle_id(ctx.memory_type());
66 
67 #ifdef VIENNACL_WITH_OPENCL
68  if (ctx.memory_type() == OPENCL_MEMORY)
69  {
70  coords_.opencl_handle().context(ctx.opencl_context());
71  elements_.opencl_handle().context(ctx.opencl_context());
72  }
73 #endif
74  }
75 
77  void clear()
78  {
79  maxnnz_ = 0;
80 
82  std::vector<NumericT> host_elements(internal_size1());
83 
84  viennacl::backend::memory_create(coords_, host_coords_buffer.element_size() * internal_size1(), viennacl::traits::context(coords_), host_coords_buffer.get());
85  viennacl::backend::memory_create(elements_, sizeof(NumericT) * internal_size1(), viennacl::traits::context(elements_), &(host_elements[0]));
86  }
87 
88  vcl_size_t internal_size1() const { return viennacl::tools::align_to_multiple<vcl_size_t>(rows_, AlignmentV); }
89  vcl_size_t internal_size2() const { return viennacl::tools::align_to_multiple<vcl_size_t>(cols_, AlignmentV); }
90 
91  vcl_size_t size1() const { return rows_; }
92  vcl_size_t size2() const { return cols_; }
93 
94  vcl_size_t internal_maxnnz() const {return viennacl::tools::align_to_multiple<vcl_size_t>(maxnnz_, AlignmentV); }
95  vcl_size_t maxnnz() const { return maxnnz_; }
96 
97  vcl_size_t nnz() const { return rows_ * maxnnz_; }
99 
100  handle_type & handle() { return elements_; }
101  const handle_type & handle() const { return elements_; }
102 
103  handle_type & handle2() { return coords_; }
104  const handle_type & handle2() const { return coords_; }
105 
106 #if defined(_MSC_VER) && _MSC_VER < 1500 //Visual Studio 2005 needs special treatment
107  template<typename CPUMatrixT>
108  friend void copy(const CPUMatrixT & cpu_matrix, ell_matrix & gpu_matrix );
109 #else
110  template<typename CPUMatrixT, typename T, unsigned int ALIGN>
111  friend void copy(const CPUMatrixT & cpu_matrix, ell_matrix<T, ALIGN> & gpu_matrix );
112 #endif
113 
114 private:
115  vcl_size_t rows_;
116  vcl_size_t cols_;
117  vcl_size_t maxnnz_;
118 
119  handle_type coords_;
120  handle_type elements_;
121 };
122 
123 template<typename CPUMatrixT, typename NumericT, unsigned int AlignmentV>
124 void copy(const CPUMatrixT& cpu_matrix, ell_matrix<NumericT, AlignmentV>& gpu_matrix )
125 {
126  assert( (gpu_matrix.size1() == 0 || viennacl::traits::size1(cpu_matrix) == gpu_matrix.size1()) && bool("Size mismatch") );
127  assert( (gpu_matrix.size2() == 0 || viennacl::traits::size2(cpu_matrix) == gpu_matrix.size2()) && bool("Size mismatch") );
128 
129  if (cpu_matrix.size1() > 0 && cpu_matrix.size2() > 0)
130  {
131  //determine max capacity for row
132  vcl_size_t max_entries_per_row = 0;
133  for (typename CPUMatrixT::const_iterator1 row_it = cpu_matrix.begin1(); row_it != cpu_matrix.end1(); ++row_it)
134  {
135  vcl_size_t num_entries = 0;
136  for (typename CPUMatrixT::const_iterator2 col_it = row_it.begin(); col_it != row_it.end(); ++col_it)
137  ++num_entries;
138 
139  max_entries_per_row = std::max(max_entries_per_row, num_entries);
140  }
141 
142  //setup GPU matrix
143  gpu_matrix.maxnnz_ = max_entries_per_row;
144  gpu_matrix.rows_ = cpu_matrix.size1();
145  gpu_matrix.cols_ = cpu_matrix.size2();
146 
147  vcl_size_t nnz = gpu_matrix.internal_nnz();
148 
150  std::vector<NumericT> elements(nnz, 0);
151 
152  // std::cout << "ELL_MATRIX copy " << gpu_matrix.maxnnz_ << " " << gpu_matrix.rows_ << " " << gpu_matrix.cols_ << " "
153  // << gpu_matrix.internal_maxnnz() << "\n";
154 
155  for (typename CPUMatrixT::const_iterator1 row_it = cpu_matrix.begin1(); row_it != cpu_matrix.end1(); ++row_it)
156  {
157  vcl_size_t data_index = 0;
158 
159  for (typename CPUMatrixT::const_iterator2 col_it = row_it.begin(); col_it != row_it.end(); ++col_it)
160  {
161  coords.set(gpu_matrix.internal_size1() * data_index + col_it.index1(), col_it.index2());
162  elements[gpu_matrix.internal_size1() * data_index + col_it.index1()] = *col_it;
163  //std::cout << *col_it << "\n";
164  data_index++;
165  }
166  }
167 
168  viennacl::backend::memory_create(gpu_matrix.handle2(), coords.raw_size(), traits::context(gpu_matrix.handle2()), coords.get());
169  viennacl::backend::memory_create(gpu_matrix.handle(), sizeof(NumericT) * elements.size(), traits::context(gpu_matrix.handle()), &(elements[0]));
170  }
171 }
172 
173 
174 
180 template<typename IndexT, typename NumericT, unsigned int AlignmentV>
181 void copy(std::vector< std::map<IndexT, NumericT> > const & cpu_matrix,
183 {
184  tools::const_sparse_matrix_adapter<NumericT, IndexT> temp(cpu_matrix, cpu_matrix.size(), cpu_matrix.size());
185  viennacl::copy(temp, gpu_matrix);
186 }
187 
188 
189 
190 
191 
192 
193 template<typename CPUMatrixT, typename NumericT, unsigned int AlignmentV>
194 void copy(const ell_matrix<NumericT, AlignmentV>& gpu_matrix, CPUMatrixT& cpu_matrix)
195 {
196  assert( (viennacl::traits::size1(cpu_matrix) == gpu_matrix.size1()) && bool("Size mismatch") );
197  assert( (viennacl::traits::size2(cpu_matrix) == gpu_matrix.size2()) && bool("Size mismatch") );
198 
199  if (gpu_matrix.size1() > 0 && gpu_matrix.size2() > 0)
200  {
201  std::vector<NumericT> elements(gpu_matrix.internal_nnz());
203 
204  viennacl::backend::memory_read(gpu_matrix.handle(), 0, sizeof(NumericT) * elements.size(), &(elements[0]));
205  viennacl::backend::memory_read(gpu_matrix.handle2(), 0, coords.raw_size(), coords.get());
206 
207  for (vcl_size_t row = 0; row < gpu_matrix.size1(); row++)
208  {
209  for (vcl_size_t ind = 0; ind < gpu_matrix.internal_maxnnz(); ind++)
210  {
211  vcl_size_t offset = gpu_matrix.internal_size1() * ind + row;
212 
213  NumericT val = elements[offset];
214  if (val <= 0 && val >= 0) // val == 0 without compiler warnings
215  continue;
216 
217  if (coords[offset] >= gpu_matrix.size2())
218  {
219  std::cerr << "ViennaCL encountered invalid data " << offset << " " << ind << " " << row << " " << coords[offset] << " " << gpu_matrix.size2() << std::endl;
220  return;
221  }
222 
223  cpu_matrix(row, coords[offset]) = val;
224  }
225  }
226  }
227 }
228 
229 
235 template<typename NumericT, unsigned int AlignmentV, typename IndexT>
236 void copy(const ell_matrix<NumericT, AlignmentV> & gpu_matrix,
237  std::vector< std::map<IndexT, NumericT> > & cpu_matrix)
238 {
239  tools::sparse_matrix_adapter<NumericT, IndexT> temp(cpu_matrix, cpu_matrix.size(), cpu_matrix.size());
240  viennacl::copy(gpu_matrix, temp);
241 }
242 
243 //
244 // Specify available operations:
245 //
246 
249 namespace linalg
250 {
251 namespace detail
252 {
253  // x = A * y
254  template<typename T, unsigned int A>
255  struct op_executor<vector_base<T>, op_assign, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> >
256  {
257  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> const & rhs)
258  {
259  // check for the special case x = A * x
260  if (viennacl::traits::handle(lhs) == viennacl::traits::handle(rhs.rhs()))
261  {
262  viennacl::vector<T> temp(lhs);
263  viennacl::linalg::prod_impl(rhs.lhs(), rhs.rhs(), temp);
264  lhs = temp;
265  }
266  else
267  viennacl::linalg::prod_impl(rhs.lhs(), rhs.rhs(), lhs);
268  }
269  };
270 
271  template<typename T, unsigned int A>
272  struct op_executor<vector_base<T>, op_inplace_add, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> >
273  {
274  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> const & rhs)
275  {
276  viennacl::vector<T> temp(lhs);
277  viennacl::linalg::prod_impl(rhs.lhs(), rhs.rhs(), temp);
278  lhs += temp;
279  }
280  };
281 
282  template<typename T, unsigned int A>
283  struct op_executor<vector_base<T>, op_inplace_sub, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> >
284  {
285  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_base<T>, op_prod> const & rhs)
286  {
287  viennacl::vector<T> temp(lhs);
288  viennacl::linalg::prod_impl(rhs.lhs(), rhs.rhs(), temp);
289  lhs -= temp;
290  }
291  };
292 
293 
294  // x = A * vec_op
295  template<typename T, unsigned int A, typename LHS, typename RHS, typename OP>
296  struct op_executor<vector_base<T>, op_assign, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
297  {
298  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> const & rhs)
299  {
300  viennacl::vector<T> temp(rhs.rhs(), viennacl::traits::context(rhs));
301  viennacl::linalg::prod_impl(rhs.lhs(), temp, lhs);
302  }
303  };
304 
305  // x = A * vec_op
306  template<typename T, unsigned int A, typename LHS, typename RHS, typename OP>
307  struct op_executor<vector_base<T>, op_inplace_add, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
308  {
309  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> const & rhs)
310  {
311  viennacl::vector<T> temp(rhs.rhs(), viennacl::traits::context(rhs));
312  viennacl::vector<T> temp_result(lhs);
313  viennacl::linalg::prod_impl(rhs.lhs(), temp, temp_result);
314  lhs += temp_result;
315  }
316  };
317 
318  // x = A * vec_op
319  template<typename T, unsigned int A, typename LHS, typename RHS, typename OP>
320  struct op_executor<vector_base<T>, op_inplace_sub, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> >
321  {
322  static void apply(vector_base<T> & lhs, vector_expression<const ell_matrix<T, A>, const vector_expression<const LHS, const RHS, OP>, op_prod> const & rhs)
323  {
324  viennacl::vector<T> temp(rhs.rhs(), viennacl::traits::context(rhs));
325  viennacl::vector<T> temp_result(lhs);
326  viennacl::linalg::prod_impl(rhs.lhs(), temp, temp_result);
327  lhs -= temp_result;
328  }
329  };
330 
331 } // namespace detail
332 } // namespace linalg
333 
335 }
336 
337 #endif
338 
339 
scalar< typename viennacl::tools::CHECK_SCALAR_TEMPLATE_ARGUMENT< NumericT >::ResultType > value_type
Definition: ell_matrix.hpp:57
vcl_size_t size1() const
Definition: ell_matrix.hpp:91
Helper class implementing an array on the host. Default case: No conversion necessary.
Definition: util.hpp:92
vcl_size_t element_size() const
Definition: util.hpp:112
handle_type & handle2()
Definition: ell_matrix.hpp:103
This class represents a single scalar value on the GPU and behaves mostly like a built-in scalar type...
Definition: forwards.h:227
void clear()
Resets all entries in the matrix back to zero without changing the matrix size. Resets the sparsity p...
Definition: ell_matrix.hpp:77
const handle_type & handle2() const
Definition: ell_matrix.hpp:104
Various little tools used here and there in ViennaCL.
vcl_size_t size1(MatrixType const &mat)
Generic routine for obtaining the number of rows of a matrix (ViennaCL, uBLAS, etc.)
Definition: size.hpp:163
vcl_size_t nnz() const
Definition: ell_matrix.hpp:97
This file provides the forward declarations for the main types used within ViennaCL.
vcl_size_t size2() const
Definition: ell_matrix.hpp:92
void memory_read(mem_handle const &src_buffer, vcl_size_t src_offset, vcl_size_t bytes_to_read, void *ptr, bool async=false)
Reads data from a buffer back to main RAM.
Definition: memory.hpp:261
T max(const T &lhs, const T &rhs)
Maximum.
Definition: util.hpp:59
viennacl::backend::mem_handle handle_type
Definition: ell_matrix.hpp:56
result_of::size_type< MatrixType >::type size2(MatrixType const &mat)
Generic routine for obtaining the number of columns of a matrix (ViennaCL, uBLAS, etc...
Definition: size.hpp:201
vcl_size_t internal_size1() const
Definition: ell_matrix.hpp:88
float NumericT
Definition: bisect.cpp:40
Represents a generic 'context' similar to an OpenCL context, but is backend-agnostic and thus also su...
Definition: context.hpp:39
Main namespace in ViennaCL. Holds all the basic types such as vector, matrix, etc. and defines operations upon them.
Definition: cpu_ram.hpp:34
const handle_type & handle() const
Definition: ell_matrix.hpp:101
vcl_size_t internal_nnz() const
Definition: ell_matrix.hpp:98
Definition: blas3.hpp:36
Sparse matrix class using the ELLPACK format for storing the nonzeros.
Definition: ell_matrix.hpp:53
friend void copy(const CPUMatrixT &cpu_matrix, ell_matrix< T, ALIGN > &gpu_matrix)
Implementations of operations using sparse matrices.
Adapts a constant sparse matrix type made up from std::vector > to basic ub...
Definition: adapter.hpp:183
std::size_t vcl_size_t
Definition: forwards.h:75
vcl_size_t maxnnz() const
Definition: ell_matrix.hpp:95
vector_expression< const matrix_base< NumericT, F >, const unsigned int, op_row > row(const matrix_base< NumericT, F > &A, unsigned int i)
Definition: matrix.hpp:900
viennacl::memory_types memory_type() const
Definition: context.hpp:76
handle_type & handle()
Definition: ell_matrix.hpp:100
void switch_active_handle_id(memory_types new_id)
Switches the currently active handle. If no support for that backend is provided, an exception is thr...
Definition: mem_handle.hpp:121
viennacl::context context(T const &t)
Returns an ID for the currently active memory domain of an object.
Definition: context.hpp:40
The vector type with operator-overloads and proxy classes is defined here. Linear algebra operations ...
void copy(std::vector< NumericT > &cpu_vec, circulant_matrix< NumericT, AlignmentV > &gpu_mat)
Copies a circulant matrix from the std::vector to the OpenCL device (either GPU or multi-core CPU) ...
Main abstraction class for multiple memory domains. Represents a buffer in either main RAM...
Definition: mem_handle.hpp:89
vcl_size_t raw_size() const
Returns the number of bytes of the currently active buffer.
Definition: mem_handle.hpp:230
Adapts a non-const sparse matrix type made up from std::vector > to basic u...
Definition: adapter.hpp:357
void memory_create(mem_handle &handle, vcl_size_t size_in_bytes, viennacl::context const &ctx, const void *host_ptr=NULL)
Creates an array of the specified size. If the second argument is provided, the buffer is initialized...
Definition: memory.hpp:87
vcl_size_t internal_size2() const
Definition: ell_matrix.hpp:89
void prod_impl(const matrix_base< NumericT > &mat, const vector_base< NumericT > &vec, vector_base< NumericT > &result)
Carries out matrix-vector multiplication.
viennacl::backend::mem_handle & handle(T &obj)
Returns the generic memory handle of an object. Non-const version.
Definition: handle.hpp:41
vcl_size_t size_type
Definition: ell_matrix.hpp:58
vcl_size_t internal_maxnnz() const
Definition: ell_matrix.hpp:94
ell_matrix(viennacl::context ctx)
Definition: ell_matrix.hpp:62