Turi Create  4.0
ndarray.hpp
1 /* Copyright © 2017 Apple Inc. All rights reserved.
2  *
3  * Use of this source code is governed by a BSD-3-clause license that can
4  * be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
5  */
6 #ifndef TURI_FLEXIBLE_TYPE_NDARRAY
7 #define TURI_FLEXIBLE_TYPE_NDARRAY
8 #include <tuple>
9 #include <iostream>
10 #include <core/logging/assertions.hpp>
11 #include <core/storage/serialization/serialization_includes.hpp>
12 namespace turi {
13 namespace flexible_type_impl {
14 
15 
16 template <typename T>
17 static void mod_helper(T& a, const T& b,
18  typename std::enable_if<std::is_floating_point<T>::value>::type* = 0) {
19  a = fmod(a,b);
20 }
21 template <typename T>
22 static void mod_helper(T& a, const T& b,
23  typename std::enable_if<!std::is_floating_point<T>::value>::type* = 0) {
24  a %= b;
25 }
26 /**
27  * A generic dense multidimensional array.
28  *
29  * This class implements a very minimal generic dense multidimensional
30  * array type.
31  *
32  * The basic layout is simple.
33  * - elems: is a flattened array of all the elements
34  * - start: The offset of the 0th element in elems.
35  * - shape: is the dimensions of the ndarray. The product of all the values in
36  * shape should equal elems.length()
37  * - stride: is used to convert between ND-indices and element indices.
38  *
39  *
40  *
41  * Array indexing is computed as follows:
42  *
43  * ndarray[i,j,k] = elements[start + i * stride[0] + j * stride[1] + k * stride[2]]
44  *
45  * Note the stride are not based on the element size. i.e. if we have a simple
46  * 1-D array, stride[0] is always 1. (as opposed to sizeof(T)).
47  *
48  * The NDArray's default construction layout is "C" ordering: the stride
49  * array is non-increasing. That is, for a simple 3D ndarray[i,j,k], elements
50  * ndarray[i,j,k], ndarray[i,j,k+1] are adjacent.
51  *
52  * However, due to to explicit representation of offset, shape and stride, almost
53  * arbitrary layouts and slices can be represented by the ndarray.
54  *
55  * The current implementation of the ndarray enforces that all mutation
56  * operations require elems to be unique (elems.use_count() == 1). Hence views
57  * are not modifiable; modifying a view automtacally incurs a data copy. This
58  * might be relaxed in the future (perhaps with a inherited view<T> type) but
59  * this assumption allows for simpler memory management right now.
60  *
61  * Note
62  * ----
63  * The performance of the ndarray operators is probably not particularly
64  * optimized. Really, the \ref increment_index() system should be replaced
65  * with an iterator.
66  **/
67 template <typename T>
68 class ndarray {
69  public:
70  typedef size_t index_type;
71  typedef T value_type;
72  typedef std::vector<index_type> index_range_type;
73  typedef std::vector<T> container_type;
74 
75  private:
76  std::shared_ptr<container_type> m_elem;
77  index_range_type m_shape;
78  index_range_type m_stride;
79  index_type m_start = 0;
80 
81  public:
82 
83  /// construct with custom stride ordering
84  ndarray(const container_type& elements = container_type(),
85  const index_range_type& shape = index_range_type(),
86  const index_range_type& stride = index_range_type(),
87  const index_type start = 0):
88  ndarray(std::make_shared<container_type>(elements), shape, stride, start) {}
89 
90 
91  /// construct with custom stride ordering
92  ndarray(const std::shared_ptr<container_type>& elements,
93  const index_range_type& shape = index_range_type(),
94  const index_range_type& stride = index_range_type(),
95  const index_type start = 0):
96  m_elem(elements), m_shape(shape), m_stride(stride), m_start(start) {
97 
98  // construct m_shape if not given
99  if (m_shape.size() == 0 && elements->size() - m_start > 0) {
100  m_shape.push_back(elements->size() - m_start);
101  }
102  // construct m_stide if not given
103  if (m_stride.size() == 0 && m_shape.size() > 0) {
104  m_stride.resize(m_shape.size());
105  int64_t i = m_shape.size() - 1;
106  m_stride[i] = 1;
107  --i;
108  for (;i >= 0; --i) {
109  m_stride[i] = m_stride[i + 1] * m_shape[i + 1];
110  }
111  }
112 
113  // if any shape axis is empty
114  bool empty = m_shape.size() == 0;
115  for (size_t i = 0;i < m_shape.size(); ++i) {
116  if (m_shape[i] == 0) {
117  empty = true;
118  }
119  }
120 
121  if (empty) {
122  m_elem->clear();
123  m_shape.clear();
124  m_stride.clear();
125  m_start = 0;
126  }
127 
129  for (size_t i = 0;i < m_shape.size(); ++i) {
130  ASSERT_TRUE(m_shape[i] > 0);
131  }
132  }
133 
134 
135  /// Construct a new ndarray filled with a default value
136  ndarray(const index_range_type& shape, const index_range_type& stride,
137  const value_type& default_value) {
138 
139  if(shape.empty()) {
140  return;
141  }
142 
143  index_type total_size = 1;
144 
145  for(const index_type& t : shape) {
146  total_size *= t;
147  }
148 
149  *this = ndarray(std::make_shared<container_type>(total_size, default_value),
150  shape, stride, 0);
151  }
152 
153  /// Construct a new ndarray filled with a default value
154  ndarray(const index_range_type& shape,
155  const value_type& default_value)
156  : ndarray(shape, {}, default_value)
157  {}
158 
159  ndarray(const ndarray<T>&) = default;
160  ndarray(ndarray<T>&&) = default;
161  ndarray& operator=(const ndarray<T>&) = default;
162  ndarray& operator=(ndarray<T>&&) = default;
163 
164  /**
165  * Ensures that m_elem is a unique copy.
166  */
167  void ensure_unique() {
168  if (m_elem.use_count() > 1) {
169  m_elem = std::make_shared<container_type>(*m_elem);
170  }
171  }
172 
173  /**
174  * Returns true if the ndarray is empty / has no elements.
175  */
176  bool empty() const {
177  if (m_elem->empty()) {
178  return true;
179  } else {
180  return num_elem() == 0;
181  }
182  }
183 
184  /**
185  * Returns the linear index given an N-d index
186  * performing bounds checking on the index ranges.
187  *
188  * \code
189  * std::vector<size_t> indices = {1,5,2};
190  * arr.at(arr.index(indices)) = 10 // also bounds check the linear index
191  * arr[arr.index(indices)] = 10 // does not bounds check the linear index
192  * \endcode
193  */
194  template <typename U>
195  index_type index(const std::vector<U>& index) const {
196  ASSERT_EQ(m_stride.size(), index.size());
197 
198  size_t idx = 0;
199  for (size_t i = 0; i < index.size(); ++i) {
200  index_type v = index[i];
201  ASSERT_LT(v, m_shape[i]);
202  idx += v * m_stride[i];
203  }
204  return idx;
205  }
206 
207  /**
208  * Returns the linear index given an N-d index
209  * without performing bounds checking on the index ranges.
210  *
211  * \code
212  * std::vector<size_t> indices = {1,5,2};
213  * arr[arr.fast_index(indices)] = 10 // does not bounds check the linear index
214  * arr.at(arr.fast_index(indices)) = 10 // bounds check the linear index
215  * \endcode
216  */
217  template <typename U>
218  index_type fast_index(const std::vector<U>& index) const {
219  size_t idx = 0;
220  for (size_t i = 0; i < index.size(); ++i) {
221  index_type v = index[i];
222  idx += v * m_stride[i];
223  }
224  return idx;
225  }
226 
227  /**
228  * Returns a reference to an element given the linear index, no bounds
229  * checking is performed.
230  *
231  * Note that if the memory used by this array is shared, this may have
232  * unintentional side effects (changing other arrays).
233  */
234  value_type& operator[](size_t elem_index) {
235  ensure_unique();
236  return (*m_elem)[m_start + elem_index];
237  }
238 
239  /**
240  * Returns a const reference to an element given the linear index, no bounds
241  * checking is performed.
242  */
243  const value_type& operator[](size_t elem_index) const {
244  return (*m_elem)[m_start + elem_index];
245  }
246 
247  /**
248  * Returns a reference to an element given the linear index, performing bounds
249  * checking on the index range.
250  *
251  * Note that if the memory used by this array is shared, this may have
252  * unintentional side effects (changing other arrays).
253  */
254  value_type& at(size_t elem_index) {
255  ensure_unique();
256  ASSERT_LT(m_start + elem_index, m_elem->size());
257  return (*m_elem)[m_start + elem_index];
258  }
259 
260  /**
261  * Returns a const reference to an element given the linear index, performing
262  * checking on the index range.
263  */
264  const value_type& at(size_t elem_index) const {
265  ASSERT_LT(m_start + elem_index, m_elem->size());
266  return (*m_elem)[m_start + elem_index];
267  }
268 
269  /**
270  * Returns a reference to all the elements in a linear layout; if is_full()
271  * is false, this will include unindexable elements.
272  *
273  * Note that if the memory used by this array is shared, this may have
274  * unintentional side effects (changing other arrays).
275  */
276  container_type& raw_elements() {
277  return *m_elem;
278  }
279 
280  /**
281  * Returns a reference to all the elements in a linear layout; if is_full()
282  * is false, this will include unindexable elements.
283  */
284  const container_type& raw_elements() const {
285  return *m_elem;
286  }
287 
288  /**
289  * Returns a reference to all the elements in a linear layout, is_full()
290  * must be true.
291  *
292  * Note that if the memory used by this array is shared, this may have
293  * unintentional side effects (changing other arrays).
294  */
295  container_type& elements() {
296  ensure_unique();
297  ASSERT_TRUE(is_full());
298  return *m_elem;
299  }
300  /**
301  * Returns a const reference to all the elements in a linear layout, is_full()
302  * must be true.
303  */
304  const container_type& elements() const {
305  ASSERT_TRUE(is_full());
306  return *m_elem;
307  }
308 
309  /**
310  * Returns a const reference to the shape.
311  */
312  const index_range_type& shape() const {
313  return m_shape;
314  }
315 
316  /**
317  * Returns a const reference to the stride.
318  */
319  const index_range_type& stride() const {
320  return m_stride;
321  }
322 
323  /**
324  * Returns a const reference to the stride.
325  */
326  index_type start() const {
327  return m_start;
328  }
329 
330  /**
331  * Returns the number of elements in the array.
332  *
333  * This is equivalent to the product of the values in the shape array.
334  * Note that this may not be the same as elements().size().
335  */
336  size_t num_elem() const {
337  if (m_shape.size() == 0) return 0;
338  if (m_elem == nullptr) return 0;
339  size_t p = 1;
340  for (size_t s: m_shape) {
341  p = p * s;
342  }
343  return p;
344  }
345 
346  /**
347  * Returns true if every element in elements() is reachable by an
348  * N-d index.
349  */
350  bool is_full() const {
351  return m_start == 0 &&
352  num_elem() == m_elem->size() &&
353  last_index() == m_elem->size();
354  }
355 
356  /**
357  * Returns true if the shape and stride of the array is laid out
358  * correctly such at all array indices are within elements().size().
359  *
360  * An ndarray can be invalid for instance, if the stride is too large,
361  * or if the shape is larger than the total number of elements.
362  */
363  bool is_valid() const {
364  return m_shape.size() == m_stride.size() && // shape and stride line up
365  num_elem() + m_start <= m_elem->size() && // num_elements (as computed by shape) is in m_elem
366  last_index() + m_start <= m_elem->size(); // max index (as computed by stride( is in m_elem
367  }
368 
369  /**
370  * Returns true if the stride is ordered canonically.
371  * The strides must be non-increasing and non-zero.
372  */
373  bool has_canonical_stride() const {
374  if (m_stride.size() == 0) return true;
375  if (m_stride[0] == 0) return false;
376  for (size_t i = 1; i < m_stride.size(); ++i) {
377  if (m_stride[i] == 0 || m_stride[i - 1] < m_stride[i]) {
378  return false;
379  }
380  }
381  return true;
382  }
383 
384  /// Returns true if the nd-array is in canonical ordering
385  bool is_canonical() const {
386  return is_full() && has_canonical_stride();
387  }
388 
389  /**
390  * Increments a vector representing an N-D index.
391  *
392  * Assumes that the index is valid to begin with.
393  * Returns 1 + [the index position we incremented] while we have not reached
394  * the end of the array. Returns 0 once we increment past the end of the
395  * array.
396  */
397  template <typename U, typename V>
398  static size_t inline increment_index(std::vector<U>& idx, const std::vector<V>& _shape) {
399  DASSERT_TRUE(idx.size() == _shape.size());
400  int64_t i = idx.size() - 1;
401  for (;i >= 0 ; --i) {
402  ++idx[i];
403  if (idx[i] < _shape[i]) break;
404  // we hit counter limit we need to advance the next counter;
405  idx[i] = 0;
406  }
407  if (i < 0) return 0;
408  else return i + 1;
409  }
410 
411  /**
412  * Increments a vector representing an N-D index.
413  *
414  * Assumes that the index is valid to begin with.
415  * Returns 1 + [the index position we incremented] while we have not reached
416  * the end of the array. Returns 0 once we increment past the end of the
417  * array.
418  */
419  template <typename U>
420  size_t inline increment_index(std::vector<U>& idx) const {
421  return ndarray::increment_index(idx, m_shape);
422  }
423 
424  /**
425  * Returns an ndarray ordered canonically.
426  *
427  * The canonical ordering is full (\ref is_full()) and the stride array
428  * is non-descending.
429  *
430  * Raises an exception if the array is not valid.
431  *
432  * \note The performance of this algorithm can probably be improved.
433  */
435  if (is_canonical()) return (*this);
437 
438  ndarray<T> ret;
439  ret.m_start = 0;
440  ret.m_shape = m_shape;
441  ret.m_elem->resize(num_elem());
442  ret.m_stride.resize(m_shape.size());
443 
444  // empty array
445  if (ret.m_shape.size() == 0 || ret.m_elem->size() == 0) {
446  return ret;
447  }
448 
449  // compute the stride
450  int64_t i = ret.m_shape.size() - 1;
451  ret.m_stride[i] = 1;
452  --i;
453  for (;i >= 0; --i) {
454  ret.m_stride[i] = ret.m_stride[i + 1] * ret.m_shape[i + 1];
455  }
456 
457  std::vector<size_t> idx(m_shape.size(), 0);
458  size_t ctr = 0;
459  do {
460  // directly referencing ret.m_elem is ok here because ret.m_start is 0
461  (*ret.m_elem)[ctr] = (*this)[fast_index(idx)];
462  ++ctr;
463  } while(increment_index(idx));
464 
465  return ret;
466  }
467 
468  /**
469  * Forces this ndarray to be full by compacting if necessary.
470  */
471  void ensure_full() {
472  if (!is_full()) {
473  (*this) = compact();
474  }
475  }
476 
477  /**
478  * Returns a compacted ndarray.
479  *
480  * A compacted NDArray has the same stride ordering as the original array,
481  * but enforces that the array is full. This essentially means that the
482  * elements array has the same order of elements, but skipped elements are
483  * removed.
484  *
485  * Raises an exception if the array is not valid.
486  *
487  * \note The performance of this algorithm can probably be improved.
488  */
489  ndarray<T> compact() const {
491  if (is_full()) return (*this);
492 
493  ndarray<T> ret;
494  ret.m_start = 0;
495  ret.m_shape = m_shape;
496  ret.m_elem->resize(num_elem());
497  ret.m_stride.resize(m_shape.size());
498 
499  // empty array
500  if (ret.m_shape.size() == 0 || ret.m_elem->size() == 0) {
501  return ret;
502  }
503 
504  std::vector<std::pair<size_t, size_t>> stride_ordering(m_stride.size());
505  for (size_t i = 0;i < m_stride.size(); ++i) stride_ordering[i] = {m_stride[i], i};
506  std::sort(stride_ordering.rbegin(), stride_ordering.rend());
507 
508  // compute the stride
509  ret.m_stride[stride_ordering[0].second] = 1;
510  for (size_t i = 1;i < m_stride.size(); ++i) {
511  ret.m_stride[stride_ordering[i].second] =
512  ret.m_stride[stride_ordering[i - 1].second] * ret.m_shape[stride_ordering[i - 1].second];
513  }
514 
515  std::vector<size_t> idx(m_shape.size(), 0);
516  do {
517  // directly referencing ret.m_elem is ok here because ret.m_start is 0
518  (*ret.m_elem)[ret.fast_index(idx)] = (*this)[fast_index(idx)];
519  } while(increment_index(idx));
520 
521  return ret;
522  }
523 
524  /// serializer
525  void save(oarchive& oarc) const {
527  oarc << char(0);
528  if (is_full()) {
529  oarc << m_shape;
530  oarc << m_stride;
531  oarc << *m_elem;
532  } else {
533  ndarray<T> c = compact();
534  ASSERT_TRUE(c.is_full());
535  oarc << c.m_shape;
536  oarc << c.m_stride;
537  oarc << *(c.m_elem);
538  }
539  }
540 
541  /// deserializer
542  void load(iarchive& iarc) {
543  char c;
544  iarc >> c;
545  ASSERT_TRUE(c == 0);
546  m_start = 0;
547  iarc >> m_shape;
548  iarc >> m_stride;
549  m_elem = std::make_shared<container_type>();
550  iarc >> *m_elem;
551  }
552 
553  /**
554  * Return true if this ndarray has the same shape as another ndarray.
555  */
556  bool same_shape(const ndarray<T>& other) const {
557  if (m_shape.size() != other.m_shape.size()) return false;
558  for (size_t i = 0;i < m_shape.size(); ++i) {
559  if (m_shape[i] != other.m_shape[i]) return false;
560  }
561  return true;
562  }
563 
564  /// element-wise addition. The other array must have the same shape.
566  ASSERT_TRUE(same_shape(other));
567  if (num_elem() == 0) return *this;
568  ensure_unique();
569  std::vector<size_t> idx(m_shape.size(), 0);
570  do {
571  (*this)[fast_index(idx)] += other[other.fast_index(idx)];
572  } while(increment_index(idx));
573  return *this;
574  }
575 
576  /// scalar addition.
577  ndarray<T>& operator+=(T other) {
578  if (num_elem() == 0) return *this;
579  ensure_unique();
580  std::vector<size_t> idx(m_shape.size(), 0);
581  do {
582  (*this)[fast_index(idx)] += other;
583  } while(increment_index(idx));
584  return *this;
585  }
586 
587  /// element-wise subtraction. The other array must have the same shape.
589  ASSERT_TRUE(same_shape(other));
590  if (num_elem() == 0) return *this;
591  ensure_unique();
592  std::vector<size_t> idx(m_shape.size(), 0);
593  do {
594  (*this)[fast_index(idx)] -= other[other.fast_index(idx)];
595  } while(increment_index(idx));
596  return *this;
597  }
598 
599  /// scalar subtraction.
600  ndarray<T>& operator-=(T other) {
601  if (num_elem() == 0) return *this;
602  ensure_unique();
603  std::vector<size_t> idx(m_shape.size(), 0);
604  do {
605  (*this)[fast_index(idx)] -= other;
606  } while(increment_index(idx));
607  return *this;
608  }
609 
610  /// element-wise multiplication. The other array must have the same shape.
612  ASSERT_TRUE(same_shape(other));
613  if (num_elem() == 0) return *this;
614  ensure_unique();
615  std::vector<size_t> idx(m_shape.size(), 0);
616  do {
617  (*this)[fast_index(idx)] *= other[other.fast_index(idx)];
618  } while(increment_index(idx));
619  return *this;
620  }
621 
622  /// scalar multiplication
623  ndarray<T>& operator*=(T other) {
624  if (num_elem() == 0) return *this;
625  ensure_unique();
626  std::vector<size_t> idx(m_shape.size(), 0);
627  do {
628  (*this)[fast_index(idx)] *= other;
629  } while(increment_index(idx));
630  return *this;
631  }
632 
633 
634  /// element-wise division. The other array must have the same shape.
636  ASSERT_TRUE(same_shape(other));
637  if (num_elem() == 0) return *this;
638  ensure_unique();
639  std::vector<size_t> idx(m_shape.size(), 0);
640  do {
641  (*this)[fast_index(idx)] /= other[other.fast_index(idx)];
642  } while(increment_index(idx));
643  return *this;
644  }
645 
646  /// scalar division
647  ndarray<T>& operator/=(T other) {
648  if (num_elem() == 0) return *this;
649  ensure_unique();
650  std::vector<size_t> idx(m_shape.size(), 0);
651  do {
652  (*this)[fast_index(idx)] /= other;
653  } while(increment_index(idx));
654  return *this;
655  }
656 
657  /// element-wise modulo. The other array must have the same shape.
659  ASSERT_TRUE(same_shape(other));
660  if (num_elem() == 0) return *this;
661  ensure_unique();
662  std::vector<size_t> idx(m_shape.size(), 0);
663  do {
664  T& left = (*this)[fast_index(idx)];
665  mod_helper(left, other[other.fast_index(idx)]);
666  } while(increment_index(idx));
667  return *this;
668  }
669 
670  /// scalar modulo.
671  ndarray<T>& operator%=(T other) {
672  if (num_elem() == 0) return *this;
673  ensure_unique();
674  std::vector<size_t> idx(m_shape.size(), 0);
675  do {
676  T& left = (*this)[fast_index(idx)];
677  mod_helper(left, other);
678  } while(increment_index(idx));
679  return *this;
680  }
681 
682  /// negation
684  if (num_elem() == 0) return *this;
685  ensure_unique();
686  std::vector<size_t> idx(m_shape.size(), 0);
687  do {
688  T& v = (*this)[fast_index(idx)];
689  v = -v;
690  } while(increment_index(idx));
691  return *this;
692  }
693 
694 
695  /// element-wise equality. The other array must have the same shape.
696  bool operator==(const ndarray<T>& other) const {
697  if (&other == this) return true;
698  if (!same_shape(other)) return false;
699  if (num_elem() == 0) return true;
700  std::vector<size_t> idx(m_shape.size(), 0);
701  do {
702  if ((*this)[fast_index(idx)] != other[other.fast_index(idx)]) return false;
703  } while(increment_index(idx));
704  return true;
705  }
706 
707  /// inverse of ==
708  bool operator!=(const ndarray<T>& other) const {
709  return !((*this) == other);
710  }
711 
712  void print(std::ostream& os) const {
713  std::vector<size_t> idx(m_shape.size(), 0);
714  if (num_elem() == 0) os << "[]";
715 
716  // print all the open square brackets
717  for (size_t i = 0;i < idx.size(); ++i) os << "[";
718  size_t next_bracket_depth;
719  bool is_first_element = true;
720  do {
721  if (is_first_element == false) os << ",";
722  os << (*m_elem)[fast_index(idx)];
723  is_first_element = false;
724  next_bracket_depth = increment_index(idx);
725  if (next_bracket_depth == 0) break;
726  for (size_t i = next_bracket_depth ;i < idx.size(); ++i) os << "]";
727  if (next_bracket_depth < idx.size()) os << ",";
728  for (size_t i = next_bracket_depth;i < idx.size(); ++i) os << "[";
729  if (next_bracket_depth < idx.size()) is_first_element = true;
730  }while(1);
731  for (size_t i = 0;i < idx.size(); ++i) os << "]";
732  }
733 
734  private:
735  /**
736  * Returns one past the last valid linear index of the array according to the
737  * shape and stride information.
738  */
739  size_t last_index() const {
740  if (m_shape.size() == 0) return 0;
741  size_t last_idx = 0;
742  for (size_t i = 0; i < m_shape.size(); ++i) {
743  last_idx += (m_shape[i]-1) * m_stride[i];
744  }
745  return last_idx + 1;
746  }
747 
748 };
749 
750 // pointer to ndarray is constrained to pointer size
751 // to enforce that it will always fit in a flexible_type.
752 static_assert(sizeof(ndarray<int>*) == sizeof(size_t), "Pointer to ndarray must be the same size as size_t");
753 
754 template <typename T>
755 std::ostream& operator<<(std::ostream& os, const ndarray<T>& n) {
756  n.print(os);
757  return os;
758 }
759 
760 } // flexible_type_impl
761 } // namespace turi
762 #endif
ndarray(const std::shared_ptr< container_type > &elements, const index_range_type &shape=index_range_type(), const index_range_type &stride=index_range_type(), const index_type start=0)
construct with custom stride ordering
Definition: ndarray.hpp:92
ndarray< T > & operator%=(T other)
scalar modulo.
Definition: ndarray.hpp:671
const value_type & at(size_t elem_index) const
Definition: ndarray.hpp:264
The serialization input archive object which, provided with a reference to an istream, will read from the istream, providing deserialization capabilities.
Definition: iarchive.hpp:60
value_type & operator[](size_t elem_index)
Definition: ndarray.hpp:234
std::shared_ptr< sframe > sort(std::shared_ptr< planner_node > sframe_planner_node, const std::vector< std::string > column_names, const std::vector< size_t > &sort_column_indices, const std::vector< bool > &sort_orders)
ndarray< T > & operator*=(T other)
scalar multiplication
Definition: ndarray.hpp:623
ndarray< T > canonicalize() const
Definition: ndarray.hpp:434
bool is_canonical() const
Returns true if the nd-array is in canonical ordering.
Definition: ndarray.hpp:385
STL namespace.
void save(oarchive &oarc) const
serializer
Definition: ndarray.hpp:525
container_type & elements()
Definition: ndarray.hpp:295
ndarray< T > & operator/=(const ndarray< T > &other)
element-wise division. The other array must have the same shape.
Definition: ndarray.hpp:635
void load(iarchive &iarc)
deserializer
Definition: ndarray.hpp:542
ndarray< T > compact() const
Definition: ndarray.hpp:489
index_type fast_index(const std::vector< U > &index) const
Definition: ndarray.hpp:218
const index_range_type & shape() const
Definition: ndarray.hpp:312
ndarray< T > & operator*=(const ndarray< T > &other)
element-wise multiplication. The other array must have the same shape.
Definition: ndarray.hpp:611
const index_range_type & stride() const
Definition: ndarray.hpp:319
container_type & raw_elements()
Definition: ndarray.hpp:276
const value_type & operator[](size_t elem_index) const
Definition: ndarray.hpp:243
ndarray< T > & operator+=(T other)
scalar addition.
Definition: ndarray.hpp:577
ndarray< T > & operator%=(const ndarray< T > &other)
element-wise modulo. The other array must have the same shape.
Definition: ndarray.hpp:658
ndarray(const index_range_type &shape, const value_type &default_value)
Construct a new ndarray filled with a default value.
Definition: ndarray.hpp:154
bool operator!=(const ndarray< T > &other) const
inverse of ==
Definition: ndarray.hpp:708
#define ASSERT_TRUE(cond)
Definition: assertions.hpp:309
static size_t increment_index(std::vector< U > &idx, const std::vector< V > &_shape)
Definition: ndarray.hpp:398
ndarray< T > & negate()
negation
Definition: ndarray.hpp:683
bool operator==(const ndarray< T > &other) const
element-wise equality. The other array must have the same shape.
Definition: ndarray.hpp:696
index_type index(const std::vector< U > &index) const
Definition: ndarray.hpp:195
ndarray(const container_type &elements=container_type(), const index_range_type &shape=index_range_type(), const index_range_type &stride=index_range_type(), const index_type start=0)
construct with custom stride ordering
Definition: ndarray.hpp:84
value_type & at(size_t elem_index)
Definition: ndarray.hpp:254
ndarray< T > & operator+=(const ndarray< T > &other)
element-wise addition. The other array must have the same shape.
Definition: ndarray.hpp:565
const container_type & raw_elements() const
Definition: ndarray.hpp:284
The serialization output archive object which, provided with a reference to an ostream, will write to the ostream, providing serialization capabilities.
Definition: oarchive.hpp:80
bool same_shape(const ndarray< T > &other) const
Definition: ndarray.hpp:556
const container_type & elements() const
Definition: ndarray.hpp:304
ndarray< T > & operator-=(T other)
scalar subtraction.
Definition: ndarray.hpp:600
ndarray< T > & operator-=(const ndarray< T > &other)
element-wise subtraction. The other array must have the same shape.
Definition: ndarray.hpp:588
size_t increment_index(std::vector< U > &idx) const
Definition: ndarray.hpp:420
#define DASSERT_TRUE(cond)
Definition: assertions.hpp:364
ndarray< T > & operator/=(T other)
scalar division
Definition: ndarray.hpp:647
ndarray(const index_range_type &shape, const index_range_type &stride, const value_type &default_value)
Construct a new ndarray filled with a default value.
Definition: ndarray.hpp:136