valarray_array.h

Go to the documentation of this file.
00001 // The template and inlines for the -*- C++ -*- internal _Array helper class. 00002 00003 // Copyright (C) 1997, 1998, 1999, 2000 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 2, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // You should have received a copy of the GNU General Public License along 00017 // with this library; see the file COPYING. If not, write to the Free 00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, 00019 // USA. 00020 00021 // As a special exception, you may use this file as part of a free software 00022 // library without restriction. Specifically, if other files instantiate 00023 // templates or use macros or inline functions from this file, or you compile 00024 // this file and link it with other files to produce an executable, this 00025 // file does not by itself cause the resulting executable to be covered by 00026 // the GNU General Public License. This exception does not however 00027 // invalidate any other reasons why the executable file might be covered by 00028 // the GNU General Public License. 00029 00030 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> 00031 00032 /** @file valarray_array.h 00033 * This is an internal header file, included by other library headers. 00034 * You should not attempt to use it directly. 00035 */ 00036 00037 #ifndef _CPP_BITS_ARRAY_H 00038 #define _CPP_BITS_ARRAY_H 1 00039 00040 #pragma GCC system_header 00041 00042 #include <bits/c++config.h> 00043 #include <bits/cpp_type_traits.h> 00044 #include <cstdlib> 00045 #include <cstring> 00046 #include <new> 00047 00048 namespace std 00049 { 00050 // 00051 // Helper functions on raw pointers 00052 // 00053 00054 // We get memory by the old fashion way 00055 inline void* 00056 __valarray_get_memory(size_t __n) 00057 { return operator new(__n); } 00058 00059 template<typename _Tp> 00060 inline _Tp*__restrict__ 00061 __valarray_get_storage(size_t __n) 00062 { 00063 return static_cast<_Tp*__restrict__> 00064 (__valarray_get_memory(__n * sizeof(_Tp))); 00065 } 00066 00067 // Return memory to the system 00068 inline void 00069 __valarray_release_memory(void* __p) 00070 { operator delete(__p); } 00071 00072 // Turn a raw-memory into an array of _Tp filled with _Tp() 00073 // This is required in 'valarray<T> v(n);' 00074 template<typename _Tp, bool> 00075 struct _Array_default_ctor 00076 { 00077 // Please note that this isn't exception safe. But 00078 // valarrays aren't required to be exception safe. 00079 inline static void 00080 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00081 { while (__b != __e) new(__b++) _Tp(); } 00082 }; 00083 00084 template<typename _Tp> 00085 struct _Array_default_ctor<_Tp, true> 00086 { 00087 // For fundamental types, it suffices to say 'memset()' 00088 inline static void 00089 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00090 { memset(__b, 0, (__e - __b)*sizeof(_Tp)); } 00091 }; 00092 00093 template<typename _Tp> 00094 inline void 00095 __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00096 { 00097 _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 00098 _S_do_it(__b, __e); 00099 } 00100 00101 // Turn a raw-memory into an array of _Tp filled with __t 00102 // This is the required in valarray<T> v(n, t). Also 00103 // used in valarray<>::resize(). 00104 template<typename _Tp, bool> 00105 struct _Array_init_ctor 00106 { 00107 // Please note that this isn't exception safe. But 00108 // valarrays aren't required to be exception safe. 00109 inline static void 00110 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 00111 { while (__b != __e) new(__b++) _Tp(__t); } 00112 }; 00113 00114 template<typename _Tp> 00115 struct _Array_init_ctor<_Tp, true> 00116 { 00117 inline static void 00118 _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t) 00119 { while (__b != __e) *__b++ = __t; } 00120 }; 00121 00122 template<typename _Tp> 00123 inline void 00124 __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e, 00125 const _Tp __t) 00126 { 00127 _Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 00128 _S_do_it(__b, __e, __t); 00129 } 00130 00131 // 00132 // copy-construct raw array [__o, *) from plain array [__b, __e) 00133 // We can't just say 'memcpy()' 00134 // 00135 template<typename _Tp, bool> 00136 struct _Array_copy_ctor 00137 { 00138 // Please note that this isn't exception safe. But 00139 // valarrays aren't required to be exception safe. 00140 inline static void 00141 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 00142 _Tp* __restrict__ __o) 00143 { while (__b != __e) new(__o++) _Tp(*__b++); } 00144 }; 00145 00146 template<typename _Tp> 00147 struct _Array_copy_ctor<_Tp, true> 00148 { 00149 inline static void 00150 _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e, 00151 _Tp* __restrict__ __o) 00152 { memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); } 00153 }; 00154 00155 template<typename _Tp> 00156 inline void 00157 __valarray_copy_construct(const _Tp* __restrict__ __b, 00158 const _Tp* __restrict__ __e, 00159 _Tp* __restrict__ __o) 00160 { 00161 _Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>:: 00162 _S_do_it(__b, __e, __o); 00163 } 00164 00165 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 00166 template<typename _Tp> 00167 inline void 00168 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 00169 size_t __s, _Tp* __restrict__ __o) 00170 { 00171 if (__is_fundamental<_Tp>::_M_type) 00172 while (__n--) { *__o++ = *__a; __a += __s; } 00173 else 00174 while (__n--) { new(__o++) _Tp(*__a); __a += __s; } 00175 } 00176 00177 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 00178 template<typename _Tp> 00179 inline void 00180 __valarray_copy_construct (const _Tp* __restrict__ __a, 00181 const size_t* __restrict__ __i, 00182 _Tp* __restrict__ __o, size_t __n) 00183 { 00184 if (__is_fundamental<_Tp>::_M_type) 00185 while (__n--) *__o++ = __a[*__i++]; 00186 else 00187 while (__n--) new (__o++) _Tp(__a[*__i++]); 00188 } 00189 00190 // Do the necessary cleanup when we're done with arrays. 00191 template<typename _Tp> 00192 inline void 00193 __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e) 00194 { 00195 if (!__is_fundamental<_Tp>::_M_type) 00196 while (__b != __e) { __b->~_Tp(); ++__b; } 00197 } 00198 00199 // Fill a plain array __a[<__n>] with __t 00200 template<typename _Tp> 00201 inline void 00202 __valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 00203 { while (__n--) *__a++ = __t; } 00204 00205 // fill strided array __a[<__n-1 : __s>] with __t 00206 template<typename _Tp> 00207 inline void 00208 __valarray_fill (_Tp* __restrict__ __a, size_t __n, 00209 size_t __s, const _Tp& __t) 00210 { for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; } 00211 00212 // fill indir ect array __a[__i[<__n>]] with __i 00213 template<typename _Tp> 00214 inline void 00215 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 00216 size_t __n, const _Tp& __t) 00217 { for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; } 00218 00219 // copy plain array __a[<__n>] in __b[<__n>] 00220 // For non-fundamental types, it is wrong to say 'memcpy()' 00221 template<typename _Tp, bool> 00222 struct _Array_copier 00223 { 00224 inline static void 00225 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00226 { while (__n--) *__b++ = *__a++; } 00227 }; 00228 00229 template<typename _Tp> 00230 struct _Array_copier<_Tp, true> 00231 { 00232 inline static void 00233 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00234 { memcpy (__b, __a, __n * sizeof (_Tp)); } 00235 }; 00236 00237 // Copy a plain array __a[<__n>] into a play array __b[<>] 00238 template<typename _Tp> 00239 inline void 00240 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00241 _Tp* __restrict__ __b) 00242 { 00243 _Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>:: 00244 _S_do_it(__a, __n, __b); 00245 } 00246 00247 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00248 template<typename _Tp> 00249 inline void 00250 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 00251 _Tp* __restrict__ __b) 00252 { for (size_t __i=0; __i<__n; ++__i, ++__b, __a += __s) *__b = *__a; } 00253 00254 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00255 template<typename _Tp> 00256 inline void 00257 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 00258 size_t __n, size_t __s) 00259 { for (size_t __i=0; __i<__n; ++__i, ++__a, __b+=__s) *__b = *__a; } 00260 00261 // Copy strided array __src[<__n : __s1>] into another 00262 // strided array __dst[< : __s2>]. Their sizes must match. 00263 template<typename _Tp> 00264 inline void 00265 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 00266 _Tp* __restrict__ __dst, size_t __s2) 00267 { 00268 for (size_t __i = 0; __i < __n; ++__i) 00269 __dst[__i * __s2] = __src [ __i * __s1]; 00270 } 00271 00272 00273 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00274 template<typename _Tp> 00275 inline void 00276 __valarray_copy (const _Tp* __restrict__ __a, 00277 const size_t* __restrict__ __i, 00278 _Tp* __restrict__ __b, size_t __n) 00279 { for (size_t __j=0; __j<__n; ++__j, ++__b, ++__i) *__b = __a[*__i]; } 00280 00281 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00282 template<typename _Tp> 00283 inline void 00284 __valarray_copy (const _Tp* __restrict__ __a, size_t __n, 00285 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 00286 { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; } 00287 00288 // Copy the __n first elements of an indexed array __src[<__i>] into 00289 // another indexed array __dst[<__j>]. 00290 template<typename _Tp> 00291 inline void 00292 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 00293 const size_t* __restrict__ __i, 00294 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 00295 { 00296 for (size_t __k = 0; __k < __n; ++__k) 00297 __dst[*__j++] = __src[*__i++]; 00298 } 00299 00300 // 00301 // Compute the sum of elements in range [__f, __l) 00302 // This is a naive algorithm. It suffers from cancelling. 00303 // In the future try to specialize 00304 // for _Tp = float, double, long double using a more accurate 00305 // algorithm. 00306 // 00307 template<typename _Tp> 00308 inline _Tp 00309 __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l) 00310 { 00311 _Tp __r = _Tp(); 00312 while (__f != __l) __r += *__f++; 00313 return __r; 00314 } 00315 00316 // Compute the product of all elements in range [__f, __l) 00317 template<typename _Tp> 00318 inline _Tp 00319 __valarray_product(const _Tp* __restrict__ __f, 00320 const _Tp* __restrict__ __l) 00321 { 00322 _Tp __r = _Tp(1); 00323 while (__f != __l) __r = __r * *__f++; 00324 return __r; 00325 } 00326 00327 // Compute the min/max of an array-expression 00328 template<typename _Ta> 00329 inline typename _Ta::value_type 00330 __valarray_min(const _Ta& __a) 00331 { 00332 size_t __s = __a.size(); 00333 typedef typename _Ta::value_type _Value_type; 00334 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00335 for (size_t __i = 1; __i < __s; ++__i) 00336 { 00337 _Value_type __t = __a[__i]; 00338 if (__t < __r) 00339 __r = __t; 00340 } 00341 return __r; 00342 } 00343 00344 template<typename _Ta> 00345 inline typename _Ta::value_type 00346 __valarray_max(const _Ta& __a) 00347 { 00348 size_t __s = __a.size(); 00349 typedef typename _Ta::value_type _Value_type; 00350 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00351 for (size_t __i = 1; __i < __s; ++__i) 00352 { 00353 _Value_type __t = __a[__i]; 00354 if (__t > __r) 00355 __r = __t; 00356 } 00357 return __r; 00358 } 00359 00360 // 00361 // Helper class _Array, first layer of valarray abstraction. 00362 // All operations on valarray should be forwarded to this class 00363 // whenever possible. -- gdr 00364 // 00365 00366 template<typename _Tp> 00367 struct _Array 00368 { 00369 explicit _Array (size_t); 00370 explicit _Array (_Tp* const __restrict__); 00371 explicit _Array (const valarray<_Tp>&); 00372 _Array (const _Tp* __restrict__, size_t); 00373 00374 _Tp* begin () const; 00375 00376 _Tp* const __restrict__ _M_data; 00377 }; 00378 00379 template<typename _Tp> 00380 inline void 00381 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 00382 { __valarray_fill (__a._M_data, __n, __t); } 00383 00384 template<typename _Tp> 00385 inline void 00386 __valarray_fill (_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 00387 { __valarray_fill (__a._M_data, __n, __s, __t); } 00388 00389 template<typename _Tp> 00390 inline void 00391 __valarray_fill (_Array<_Tp> __a, _Array<size_t> __i, 00392 size_t __n, const _Tp& __t) 00393 { __valarray_fill (__a._M_data, __i._M_data, __n, __t); } 00394 00395 // Copy a plain array __a[<__n>] into a play array __b[<>] 00396 template<typename _Tp> 00397 inline void 00398 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 00399 { __valarray_copy(__a._M_data, __n, __b._M_data); } 00400 00401 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00402 template<typename _Tp> 00403 inline void 00404 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 00405 { __valarray_copy(__a._M_data, __n, __s, __b._M_data); } 00406 00407 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00408 template<typename _Tp> 00409 inline void 00410 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 00411 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 00412 00413 // Copy strided array __src[<__n : __s1>] into another 00414 // strided array __dst[< : __s2>]. Their sizes must match. 00415 template<typename _Tp> 00416 inline void 00417 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 00418 _Array<_Tp> __b, size_t __s2) 00419 { __valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 00420 00421 00422 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00423 template<typename _Tp> 00424 inline void 00425 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 00426 _Array<_Tp> __b, size_t __n) 00427 { __valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 00428 00429 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00430 template<typename _Tp> 00431 inline void 00432 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 00433 _Array<size_t> __i) 00434 { __valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 00435 00436 // Copy the __n first elements of an indexed array __src[<__i>] into 00437 // another indexed array __dst[<__j>]. 00438 template<typename _Tp> 00439 inline void 00440 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 00441 _Array<_Tp> __dst, _Array<size_t> __j) 00442 { 00443 __valarray_copy(__src._M_data, __n, __i._M_data, 00444 __dst._M_data, __j._M_data); 00445 } 00446 00447 template<typename _Tp> 00448 inline 00449 _Array<_Tp>::_Array (size_t __n) 00450 : _M_data(__valarray_get_storage<_Tp>(__n)) 00451 { __valarray_default_construct(_M_data, _M_data + __n); } 00452 00453 template<typename _Tp> 00454 inline 00455 _Array<_Tp>::_Array (_Tp* const __restrict__ __p) : _M_data (__p) {} 00456 00457 template<typename _Tp> 00458 inline _Array<_Tp>::_Array (const valarray<_Tp>& __v) 00459 : _M_data (__v._M_data) {} 00460 00461 template<typename _Tp> 00462 inline 00463 _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s) 00464 : _M_data(__valarray_get_storage<_Tp>(__s)) 00465 { __valarray_copy_construct(__b, __s, _M_data); } 00466 00467 template<typename _Tp> 00468 inline _Tp* 00469 _Array<_Tp>::begin () const 00470 { return _M_data; } 00471 00472 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 00473 template<typename _Tp> \ 00474 inline void \ 00475 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 00476 { \ 00477 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p) \ 00478 *__p _Op##= __t; \ 00479 } \ 00480 \ 00481 template<typename _Tp> \ 00482 inline void \ 00483 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 00484 { \ 00485 _Tp* __p = __a._M_data; \ 00486 for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__p, ++__q) \ 00487 *__p _Op##= *__q; \ 00488 } \ 00489 \ 00490 template<typename _Tp, class _Dom> \ 00491 void \ 00492 _Array_augmented_##_Name (_Array<_Tp> __a, \ 00493 const _Expr<_Dom,_Tp>& __e, size_t __n) \ 00494 { \ 00495 _Tp* __p (__a._M_data); \ 00496 for (size_t __i=0; __i<__n; ++__i, ++__p) *__p _Op##= __e[__i]; \ 00497 } \ 00498 \ 00499 template<typename _Tp> \ 00500 inline void \ 00501 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, size_t __s, \ 00502 _Array<_Tp> __b) \ 00503 { \ 00504 _Tp* __q (__b._M_data); \ 00505 for (_Tp* __p=__a._M_data; __p<__a._M_data+__s*__n; __p+=__s, ++__q) \ 00506 *__p _Op##= *__q; \ 00507 } \ 00508 \ 00509 template<typename _Tp> \ 00510 inline void \ 00511 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<_Tp> __b, \ 00512 size_t __n, size_t __s) \ 00513 { \ 00514 _Tp* __q (__b._M_data); \ 00515 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, __q+=__s) \ 00516 *__p _Op##= *__q; \ 00517 } \ 00518 \ 00519 template<typename _Tp, class _Dom> \ 00520 void \ 00521 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __s, \ 00522 const _Expr<_Dom,_Tp>& __e, size_t __n) \ 00523 { \ 00524 _Tp* __p (__a._M_data); \ 00525 for (size_t __i=0; __i<__n; ++__i, __p+=__s) *__p _Op##= __e[__i]; \ 00526 } \ 00527 \ 00528 template<typename _Tp> \ 00529 inline void \ 00530 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ 00531 _Array<_Tp> __b, size_t __n) \ 00532 { \ 00533 _Tp* __q (__b._M_data); \ 00534 for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__q) \ 00535 __a._M_data[*__j] _Op##= *__q; \ 00536 } \ 00537 \ 00538 template<typename _Tp> \ 00539 inline void \ 00540 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ 00541 _Array<_Tp> __b, _Array<size_t> __i) \ 00542 { \ 00543 _Tp* __p (__a._M_data); \ 00544 for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__p) \ 00545 *__p _Op##= __b._M_data[*__j]; \ 00546 } \ 00547 \ 00548 template<typename _Tp, class _Dom> \ 00549 void \ 00550 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<size_t> __i, \ 00551 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00552 { \ 00553 size_t* __j (__i._M_data); \ 00554 for (size_t __k=0; __k<__n; ++__k, ++__j) \ 00555 __a._M_data[*__j] _Op##= __e[__k]; \ 00556 } \ 00557 \ 00558 template<typename _Tp> \ 00559 void \ 00560 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ 00561 _Array<_Tp> __b, size_t __n) \ 00562 { \ 00563 bool* ok (__m._M_data); \ 00564 _Tp* __p (__a._M_data); \ 00565 for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__q, ++ok, ++__p) { \ 00566 while (! *ok) { \ 00567 ++ok; \ 00568 ++__p; \ 00569 } \ 00570 *__p _Op##= *__q; \ 00571 } \ 00572 } \ 00573 \ 00574 template<typename _Tp> \ 00575 void \ 00576 _Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, \ 00577 _Array<_Tp> __b, _Array<bool> __m) \ 00578 { \ 00579 bool* ok (__m._M_data); \ 00580 _Tp* __q (__b._M_data); \ 00581 for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, ++ok, ++__q) { \ 00582 while (! *ok) { \ 00583 ++ok; \ 00584 ++__q; \ 00585 } \ 00586 *__p _Op##= *__q; \ 00587 } \ 00588 } \ 00589 \ 00590 template<typename _Tp, class _Dom> \ 00591 void \ 00592 _Array_augmented_##_Name (_Array<_Tp> __a, _Array<bool> __m, \ 00593 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00594 { \ 00595 bool* ok(__m._M_data); \ 00596 _Tp* __p (__a._M_data); \ 00597 for (size_t __i=0; __i<__n; ++__i, ++ok, ++__p) { \ 00598 while (! *ok) { \ 00599 ++ok; \ 00600 ++__p; \ 00601 } \ 00602 *__p _Op##= __e[__i]; \ 00603 } \ 00604 } 00605 00606 _DEFINE_ARRAY_FUNCTION(+, __plus) 00607 _DEFINE_ARRAY_FUNCTION(-, __minus) 00608 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 00609 _DEFINE_ARRAY_FUNCTION(/, __divides) 00610 _DEFINE_ARRAY_FUNCTION(%, __modulus) 00611 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 00612 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 00613 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 00614 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 00615 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 00616 00617 #undef _DEFINE_VALARRAY_FUNCTION 00618 00619 } // std:: 00620 00621 #ifdef _GLIBCPP_NO_TEMPLATE_EXPORT 00622 # define export 00623 # include <bits/valarray_array.tcc> 00624 #endif 00625 00626 #endif /* _CPP_BITS_ARRAY_H */ 00627 00628 // Local Variables: 00629 // mode:c++ 00630 // End:

Generated on Sun Sep 19 16:34:04 2004 for libstdc++-v3 Source by doxygen 1.3.8