libstdc++
|
00001 // The template and inlines for the -*- C++ -*- internal _Array helper class. 00002 00003 // Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 00004 // 2006, 2007, 2009 00005 // Free Software Foundation, Inc. 00006 // 00007 // This file is part of the GNU ISO C++ Library. This library is free 00008 // software; you can redistribute it and/or modify it under the 00009 // terms of the GNU General Public License as published by the 00010 // Free Software Foundation; either version 3, or (at your option) 00011 // any later version. 00012 00013 // This library is distributed in the hope that it will be useful, 00014 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 // GNU General Public License for more details. 00017 00018 // Under Section 7 of GPL version 3, you are granted additional 00019 // permissions described in the GCC Runtime Library Exception, version 00020 // 3.1, as published by the Free Software Foundation. 00021 00022 // You should have received a copy of the GNU General Public License and 00023 // a copy of the GCC Runtime Library Exception along with this program; 00024 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00025 // <http://www.gnu.org/licenses/>. 00026 00027 /** @file valarray_array.h 00028 * This is an internal header file, included by other library headers. 00029 * You should not attempt to use it directly. 00030 */ 00031 00032 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> 00033 00034 #ifndef _VALARRAY_ARRAY_H 00035 #define _VALARRAY_ARRAY_H 1 00036 00037 #pragma GCC system_header 00038 00039 #include <bits/c++config.h> 00040 #include <bits/cpp_type_traits.h> 00041 #include <cstdlib> 00042 #include <new> 00043 00044 _GLIBCXX_BEGIN_NAMESPACE(std) 00045 00046 // 00047 // Helper functions on raw pointers 00048 // 00049 00050 // We get memory by the old fashion way 00051 inline void* 00052 __valarray_get_memory(size_t __n) 00053 { return operator new(__n); } 00054 00055 template<typename _Tp> 00056 inline _Tp*__restrict__ 00057 __valarray_get_storage(size_t __n) 00058 { 00059 return static_cast<_Tp*__restrict__> 00060 (std::__valarray_get_memory(__n * sizeof(_Tp))); 00061 } 00062 00063 // Return memory to the system 00064 inline void 00065 __valarray_release_memory(void* __p) 00066 { operator delete(__p); } 00067 00068 // Turn a raw-memory into an array of _Tp filled with _Tp() 00069 // This is required in 'valarray<T> v(n);' 00070 template<typename _Tp, bool> 00071 struct _Array_default_ctor 00072 { 00073 // Please note that this isn't exception safe. But 00074 // valarrays aren't required to be exception safe. 00075 inline static void 00076 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00077 { 00078 while (__b != __e) 00079 new(__b++) _Tp(); 00080 } 00081 }; 00082 00083 template<typename _Tp> 00084 struct _Array_default_ctor<_Tp, true> 00085 { 00086 // For fundamental types, it suffices to say 'memset()' 00087 inline static void 00088 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00089 { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); } 00090 }; 00091 00092 template<typename _Tp> 00093 inline void 00094 __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00095 { 00096 _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e); 00097 } 00098 00099 // Turn a raw-memory into an array of _Tp filled with __t 00100 // This is the required in valarray<T> v(n, t). Also 00101 // used in valarray<>::resize(). 00102 template<typename _Tp, bool> 00103 struct _Array_init_ctor 00104 { 00105 // Please note that this isn't exception safe. But 00106 // valarrays aren't required to be exception safe. 00107 inline static void 00108 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 00109 { 00110 while (__b != __e) 00111 new(__b++) _Tp(__t); 00112 } 00113 }; 00114 00115 template<typename _Tp> 00116 struct _Array_init_ctor<_Tp, true> 00117 { 00118 inline static void 00119 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 00120 { 00121 while (__b != __e) 00122 *__b++ = __t; 00123 } 00124 }; 00125 00126 template<typename _Tp> 00127 inline void 00128 __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, 00129 const _Tp __t) 00130 { 00131 _Array_init_ctor<_Tp, __is_pod(_Tp)>::_S_do_it(__b, __e, __t); 00132 } 00133 00134 // 00135 // copy-construct raw array [__o, *) from plain array [__b, __e) 00136 // We can't just say 'memcpy()' 00137 // 00138 template<typename _Tp, bool> 00139 struct _Array_copy_ctor 00140 { 00141 // Please note that this isn't exception safe. But 00142 // valarrays aren't required to be exception safe. 00143 inline static void 00144 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 00145 _Tp* __restrict__ __o) 00146 { 00147 while (__b != __e) 00148 new(__o++) _Tp(*__b++); 00149 } 00150 }; 00151 00152 template<typename _Tp> 00153 struct _Array_copy_ctor<_Tp, true> 00154 { 00155 inline static void 00156 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 00157 _Tp* __restrict__ __o) 00158 { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); } 00159 }; 00160 00161 template<typename _Tp> 00162 inline void 00163 __valarray_copy_construct(const _Tp* __restrict__ __b, 00164 const _Tp* __restrict__ __e, 00165 _Tp* __restrict__ __o) 00166 { 00167 _Array_copy_ctor<_Tp, __is_pod(_Tp)>::_S_do_it(__b, __e, __o); 00168 } 00169 00170 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 00171 template<typename _Tp> 00172 inline void 00173 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 00174 size_t __s, _Tp* __restrict__ __o) 00175 { 00176 if (__is_pod(_Tp)) 00177 while (__n--) 00178 { 00179 *__o++ = *__a; 00180 __a += __s; 00181 } 00182 else 00183 while (__n--) 00184 { 00185 new(__o++) _Tp(*__a); 00186 __a += __s; 00187 } 00188 } 00189 00190 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 00191 template<typename _Tp> 00192 inline void 00193 __valarray_copy_construct (const _Tp* __restrict__ __a, 00194 const size_t* __restrict__ __i, 00195 _Tp* __restrict__ __o, size_t __n) 00196 { 00197 if (__is_pod(_Tp)) 00198 while (__n--) 00199 *__o++ = __a[*__i++]; 00200 else 00201 while (__n--) 00202 new (__o++) _Tp(__a[*__i++]); 00203 } 00204 00205 // Do the necessary cleanup when we're done with arrays. 00206 template<typename _Tp> 00207 inline void 00208 __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00209 { 00210 if (!__is_pod(_Tp)) 00211 while (__b != __e) 00212 { 00213 __b->~_Tp(); 00214 ++__b; 00215 } 00216 } 00217 00218 // Fill a plain array __a[<__n>] with __t 00219 template<typename _Tp> 00220 inline void 00221 __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 00222 { 00223 while (__n--) 00224 *__a++ = __t; 00225 } 00226 00227 // fill strided array __a[<__n-1 : __s>] with __t 00228 template<typename _Tp> 00229 inline void 00230 __valarray_fill(_Tp* __restrict__ __a, size_t __n, 00231 size_t __s, const _Tp& __t) 00232 { 00233 for (size_t __i = 0; __i < __n; ++__i, __a += __s) 00234 *__a = __t; 00235 } 00236 00237 // fill indirect array __a[__i[<__n>]] with __i 00238 template<typename _Tp> 00239 inline void 00240 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 00241 size_t __n, const _Tp& __t) 00242 { 00243 for (size_t __j = 0; __j < __n; ++__j, ++__i) 00244 __a[*__i] = __t; 00245 } 00246 00247 // copy plain array __a[<__n>] in __b[<__n>] 00248 // For non-fundamental types, it is wrong to say 'memcpy()' 00249 template<typename _Tp, bool> 00250 struct _Array_copier 00251 { 00252 inline static void 00253 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00254 { 00255 while(__n--) 00256 *__b++ = *__a++; 00257 } 00258 }; 00259 00260 template<typename _Tp> 00261 struct _Array_copier<_Tp, true> 00262 { 00263 inline static void 00264 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00265 { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); } 00266 }; 00267 00268 // Copy a plain array __a[<__n>] into a play array __b[<>] 00269 template<typename _Tp> 00270 inline void 00271 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00272 _Tp* __restrict__ __b) 00273 { 00274 _Array_copier<_Tp, __is_pod(_Tp)>::_S_do_it(__a, __n, __b); 00275 } 00276 00277 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00278 template<typename _Tp> 00279 inline void 00280 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 00281 _Tp* __restrict__ __b) 00282 { 00283 for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) 00284 *__b = *__a; 00285 } 00286 00287 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00288 template<typename _Tp> 00289 inline void 00290 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 00291 size_t __n, size_t __s) 00292 { 00293 for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) 00294 *__b = *__a; 00295 } 00296 00297 // Copy strided array __src[<__n : __s1>] into another 00298 // strided array __dst[< : __s2>]. Their sizes must match. 00299 template<typename _Tp> 00300 inline void 00301 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 00302 _Tp* __restrict__ __dst, size_t __s2) 00303 { 00304 for (size_t __i = 0; __i < __n; ++__i) 00305 __dst[__i * __s2] = __src[__i * __s1]; 00306 } 00307 00308 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00309 template<typename _Tp> 00310 inline void 00311 __valarray_copy(const _Tp* __restrict__ __a, 00312 const size_t* __restrict__ __i, 00313 _Tp* __restrict__ __b, size_t __n) 00314 { 00315 for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) 00316 *__b = __a[*__i]; 00317 } 00318 00319 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00320 template<typename _Tp> 00321 inline void 00322 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00323 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 00324 { 00325 for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) 00326 __b[*__i] = *__a; 00327 } 00328 00329 // Copy the __n first elements of an indexed array __src[<__i>] into 00330 // another indexed array __dst[<__j>]. 00331 template<typename _Tp> 00332 inline void 00333 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 00334 const size_t* __restrict__ __i, 00335 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 00336 { 00337 for (size_t __k = 0; __k < __n; ++__k) 00338 __dst[*__j++] = __src[*__i++]; 00339 } 00340 00341 // 00342 // Compute the sum of elements in range [__f, __l) 00343 // This is a naive algorithm. It suffers from cancelling. 00344 // In the future try to specialize 00345 // for _Tp = float, double, long double using a more accurate 00346 // algorithm. 00347 // 00348 template<typename _Tp> 00349 inline _Tp 00350 __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) 00351 { 00352 _Tp __r = _Tp(); 00353 while (__f != __l) 00354 __r += *__f++; 00355 return __r; 00356 } 00357 00358 // Compute the product of all elements in range [__f, __l) 00359 template<typename _Tp> 00360 inline _Tp 00361 __valarray_product(const _Tp* __restrict__ __f, 00362 const _Tp* __restrict__ __l) 00363 { 00364 _Tp __r = _Tp(1); 00365 while (__f != __l) 00366 __r = __r * *__f++; 00367 return __r; 00368 } 00369 00370 // Compute the min/max of an array-expression 00371 template<typename _Ta> 00372 inline typename _Ta::value_type 00373 __valarray_min(const _Ta& __a) 00374 { 00375 size_t __s = __a.size(); 00376 typedef typename _Ta::value_type _Value_type; 00377 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00378 for (size_t __i = 1; __i < __s; ++__i) 00379 { 00380 _Value_type __t = __a[__i]; 00381 if (__t < __r) 00382 __r = __t; 00383 } 00384 return __r; 00385 } 00386 00387 template<typename _Ta> 00388 inline typename _Ta::value_type 00389 __valarray_max(const _Ta& __a) 00390 { 00391 size_t __s = __a.size(); 00392 typedef typename _Ta::value_type _Value_type; 00393 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00394 for (size_t __i = 1; __i < __s; ++__i) 00395 { 00396 _Value_type __t = __a[__i]; 00397 if (__t > __r) 00398 __r = __t; 00399 } 00400 return __r; 00401 } 00402 00403 // 00404 // Helper class _Array, first layer of valarray abstraction. 00405 // All operations on valarray should be forwarded to this class 00406 // whenever possible. -- gdr 00407 // 00408 00409 template<typename _Tp> 00410 struct _Array 00411 { 00412 explicit _Array(size_t); 00413 explicit _Array(_Tp* const __restrict__); 00414 explicit _Array(const valarray<_Tp>&); 00415 _Array(const _Tp* __restrict__, size_t); 00416 00417 _Tp* begin() const; 00418 00419 _Tp* const __restrict__ _M_data; 00420 }; 00421 00422 00423 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] 00424 template<typename _Tp> 00425 inline void 00426 __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, 00427 _Array<_Tp> __b, size_t __n) 00428 { std::__valarray_copy_construct(__a._M_data, __i._M_data, 00429 __b._M_data, __n); } 00430 00431 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] 00432 template<typename _Tp> 00433 inline void 00434 __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, 00435 _Array<_Tp> __b) 00436 { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } 00437 00438 template<typename _Tp> 00439 inline void 00440 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 00441 { std::__valarray_fill(__a._M_data, __n, __t); } 00442 00443 template<typename _Tp> 00444 inline void 00445 __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 00446 { std::__valarray_fill(__a._M_data, __n, __s, __t); } 00447 00448 template<typename _Tp> 00449 inline void 00450 __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, 00451 size_t __n, const _Tp& __t) 00452 { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } 00453 00454 // Copy a plain array __a[<__n>] into a play array __b[<>] 00455 template<typename _Tp> 00456 inline void 00457 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 00458 { std::__valarray_copy(__a._M_data, __n, __b._M_data); } 00459 00460 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00461 template<typename _Tp> 00462 inline void 00463 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 00464 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } 00465 00466 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00467 template<typename _Tp> 00468 inline void 00469 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 00470 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 00471 00472 // Copy strided array __src[<__n : __s1>] into another 00473 // strided array __dst[< : __s2>]. Their sizes must match. 00474 template<typename _Tp> 00475 inline void 00476 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 00477 _Array<_Tp> __b, size_t __s2) 00478 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 00479 00480 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00481 template<typename _Tp> 00482 inline void 00483 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 00484 _Array<_Tp> __b, size_t __n) 00485 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 00486 00487 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00488 template<typename _Tp> 00489 inline void 00490 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 00491 _Array<size_t> __i) 00492 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 00493 00494 // Copy the __n first elements of an indexed array __src[<__i>] into 00495 // another indexed array __dst[<__j>]. 00496 template<typename _Tp> 00497 inline void 00498 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 00499 _Array<_Tp> __dst, _Array<size_t> __j) 00500 { 00501 std::__valarray_copy(__src._M_data, __n, __i._M_data, 00502 __dst._M_data, __j._M_data); 00503 } 00504 00505 template<typename _Tp> 00506 inline 00507 _Array<_Tp>::_Array(size_t __n) 00508 : _M_data(__valarray_get_storage<_Tp>(__n)) 00509 { std::__valarray_default_construct(_M_data, _M_data + __n); } 00510 00511 template<typename _Tp> 00512 inline 00513 _Array<_Tp>::_Array(_Tp* const __restrict__ __p) 00514 : _M_data (__p) {} 00515 00516 template<typename _Tp> 00517 inline 00518 _Array<_Tp>::_Array(const valarray<_Tp>& __v) 00519 : _M_data (__v._M_data) {} 00520 00521 template<typename _Tp> 00522 inline 00523 _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) 00524 : _M_data(__valarray_get_storage<_Tp>(__s)) 00525 { std::__valarray_copy_construct(__b, __s, _M_data); } 00526 00527 template<typename _Tp> 00528 inline _Tp* 00529 _Array<_Tp>::begin () const 00530 { return _M_data; } 00531 00532 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 00533 template<typename _Tp> \ 00534 inline void \ 00535 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 00536 { \ 00537 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ 00538 *__p _Op##= __t; \ 00539 } \ 00540 \ 00541 template<typename _Tp> \ 00542 inline void \ 00543 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 00544 { \ 00545 _Tp* __p = __a._M_data; \ 00546 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ 00547 *__p _Op##= *__q; \ 00548 } \ 00549 \ 00550 template<typename _Tp, class _Dom> \ 00551 void \ 00552 _Array_augmented_##_Name(_Array<_Tp> __a, \ 00553 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00554 { \ 00555 _Tp* __p(__a._M_data); \ 00556 for (size_t __i = 0; __i < __n; ++__i, ++__p) \ 00557 *__p _Op##= __e[__i]; \ 00558 } \ 00559 \ 00560 template<typename _Tp> \ 00561 inline void \ 00562 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ 00563 _Array<_Tp> __b) \ 00564 { \ 00565 _Tp* __q(__b._M_data); \ 00566 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ 00567 __p += __s, ++__q) \ 00568 *__p _Op##= *__q; \ 00569 } \ 00570 \ 00571 template<typename _Tp> \ 00572 inline void \ 00573 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ 00574 size_t __n, size_t __s) \ 00575 { \ 00576 _Tp* __q(__b._M_data); \ 00577 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00578 ++__p, __q += __s) \ 00579 *__p _Op##= *__q; \ 00580 } \ 00581 \ 00582 template<typename _Tp, class _Dom> \ 00583 void \ 00584 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ 00585 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00586 { \ 00587 _Tp* __p(__a._M_data); \ 00588 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ 00589 *__p _Op##= __e[__i]; \ 00590 } \ 00591 \ 00592 template<typename _Tp> \ 00593 inline void \ 00594 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00595 _Array<_Tp> __b, size_t __n) \ 00596 { \ 00597 _Tp* __q(__b._M_data); \ 00598 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ 00599 ++__j, ++__q) \ 00600 __a._M_data[*__j] _Op##= *__q; \ 00601 } \ 00602 \ 00603 template<typename _Tp> \ 00604 inline void \ 00605 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00606 _Array<_Tp> __b, _Array<size_t> __i) \ 00607 { \ 00608 _Tp* __p(__a._M_data); \ 00609 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ 00610 ++__j, ++__p) \ 00611 *__p _Op##= __b._M_data[*__j]; \ 00612 } \ 00613 \ 00614 template<typename _Tp, class _Dom> \ 00615 void \ 00616 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00617 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00618 { \ 00619 size_t* __j(__i._M_data); \ 00620 for (size_t __k = 0; __k<__n; ++__k, ++__j) \ 00621 __a._M_data[*__j] _Op##= __e[__k]; \ 00622 } \ 00623 \ 00624 template<typename _Tp> \ 00625 void \ 00626 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00627 _Array<_Tp> __b, size_t __n) \ 00628 { \ 00629 bool* __ok(__m._M_data); \ 00630 _Tp* __p(__a._M_data); \ 00631 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ 00632 ++__q, ++__ok, ++__p) \ 00633 { \ 00634 while (! *__ok) \ 00635 { \ 00636 ++__ok; \ 00637 ++__p; \ 00638 } \ 00639 *__p _Op##= *__q; \ 00640 } \ 00641 } \ 00642 \ 00643 template<typename _Tp> \ 00644 void \ 00645 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00646 _Array<_Tp> __b, _Array<bool> __m) \ 00647 { \ 00648 bool* __ok(__m._M_data); \ 00649 _Tp* __q(__b._M_data); \ 00650 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00651 ++__p, ++__ok, ++__q) \ 00652 { \ 00653 while (! *__ok) \ 00654 { \ 00655 ++__ok; \ 00656 ++__q; \ 00657 } \ 00658 *__p _Op##= *__q; \ 00659 } \ 00660 } \ 00661 \ 00662 template<typename _Tp, class _Dom> \ 00663 void \ 00664 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00665 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00666 { \ 00667 bool* __ok(__m._M_data); \ 00668 _Tp* __p(__a._M_data); \ 00669 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ 00670 { \ 00671 while (! *__ok) \ 00672 { \ 00673 ++__ok; \ 00674 ++__p; \ 00675 } \ 00676 *__p _Op##= __e[__i]; \ 00677 } \ 00678 } 00679 00680 _DEFINE_ARRAY_FUNCTION(+, __plus) 00681 _DEFINE_ARRAY_FUNCTION(-, __minus) 00682 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 00683 _DEFINE_ARRAY_FUNCTION(/, __divides) 00684 _DEFINE_ARRAY_FUNCTION(%, __modulus) 00685 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 00686 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 00687 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 00688 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 00689 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 00690 00691 #undef _DEFINE_ARRAY_FUNCTION 00692 00693 _GLIBCXX_END_NAMESPACE 00694 00695 #ifndef _GLIBCXX_EXPORT_TEMPLATE 00696 # include <bits/valarray_array.tcc> 00697 #endif 00698 00699 #endif /* _ARRAY_H */