[ VIGRA Homepage | Function Index | Class Index | Namespaces | File List | Main Page ]
00001 /************************************************************************/ 00002 /* */ 00003 /* Copyright 1998-2002 by Ullrich Koethe */ 00004 /* */ 00005 /* This file is part of the VIGRA computer vision library. */ 00006 /* The VIGRA Website is */ 00007 /* http://hci.iwr.uni-heidelberg.de/vigra/ */ 00008 /* Please direct questions, bug reports, and contributions to */ 00009 /* ullrich.koethe@iwr.uni-heidelberg.de or */ 00010 /* vigra@informatik.uni-hamburg.de */ 00011 /* */ 00012 /* Permission is hereby granted, free of charge, to any person */ 00013 /* obtaining a copy of this software and associated documentation */ 00014 /* files (the "Software"), to deal in the Software without */ 00015 /* restriction, including without limitation the rights to use, */ 00016 /* copy, modify, merge, publish, distribute, sublicense, and/or */ 00017 /* sell copies of the Software, and to permit persons to whom the */ 00018 /* Software is furnished to do so, subject to the following */ 00019 /* conditions: */ 00020 /* */ 00021 /* The above copyright notice and this permission notice shall be */ 00022 /* included in all copies or substantial portions of the */ 00023 /* Software. */ 00024 /* */ 00025 /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND */ 00026 /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES */ 00027 /* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND */ 00028 /* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT */ 00029 /* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, */ 00030 /* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING */ 00031 /* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR */ 00032 /* OTHER DEALINGS IN THE SOFTWARE. */ 00033 /* */ 00034 /************************************************************************/ 00035 00036 00037 #ifndef VIGRA_STDCONVOLUTION_HXX 00038 #define VIGRA_STDCONVOLUTION_HXX 00039 00040 #include <cmath> 00041 #include "stdimage.hxx" 00042 #include "bordertreatment.hxx" 00043 #include "separableconvolution.hxx" 00044 #include "utilities.hxx" 00045 #include "sized_int.hxx" 00046 00047 namespace vigra { 00048 00049 template <class SrcIterator, class SrcAccessor, 00050 class DestIterator, class DestAccessor, 00051 class KernelIterator, class KernelAccessor, 00052 class KSumType> 00053 void internalPixelEvaluationByClip(int x, int y, int w, int h, SrcIterator xs, 00054 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00055 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00056 KSumType norm) 00057 { 00058 typedef typename 00059 PromoteTraits<typename SrcAccessor::value_type, 00060 typename KernelAccessor::value_type>::Promote SumType; 00061 typedef typename DestAccessor::value_type DestType; 00062 00063 // calculate width and height of the kernel 00064 int kernel_width = klr.x - kul.x + 1; 00065 int kernel_height = klr.y - kul.y + 1; 00066 00067 SumType sum = NumericTraits<SumType>::zero(); 00068 int xx, yy; 00069 int x0, y0, x1, y1; 00070 00071 y0 = (y<klr.y) ? -y : -klr.y; 00072 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00073 00074 x0 = (x<klr.x) ? -x : -klr.x; 00075 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00076 00077 SrcIterator yys = xs + Diff2D(x0, y0); 00078 KernelIterator yk = ki - Diff2D(x0, y0); 00079 00080 KSumType ksum = NumericTraits<KSumType>::zero(); 00081 kernel_width = x1 - x0 + 1; 00082 kernel_height = y1 - y0 + 1; 00083 00084 //es wird zuerst abgeschnitten und dann gespigelt! 00085 00086 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y) 00087 { 00088 SrcIterator xxs = yys; 00089 KernelIterator xk = yk; 00090 00091 for(xx=0; xx<kernel_width; ++xx, ++xxs.x, --xk.x) 00092 { 00093 sum += ak(xk) * src_acc(xxs); 00094 ksum += ak(xk); 00095 } 00096 } 00097 00098 // store average in destination pixel 00099 dest_acc.set(detail::RequiresExplicitCast<DestType>::cast((norm / ksum) * sum), xd); 00100 } 00101 00102 00103 #if 0 00104 00105 template <class SrcIterator, class SrcAccessor, 00106 class DestIterator, class DestAccessor, 00107 class KernelIterator, class KernelAccessor> 00108 void internalPixelEvaluationByWrapReflectRepeat(int x, int y, int src_width, int src_height, SrcIterator xs, 00109 SrcAccessor src_acc, DestIterator xd, DestAccessor dest_acc, 00110 KernelIterator ki, Diff2D kul, Diff2D klr, KernelAccessor ak, 00111 BorderTreatmentMode border) 00112 { 00113 00114 typedef typename 00115 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00116 typedef 00117 NumericTraits<typename DestAccessor::value_type> DestTraits; 00118 00119 SumType sum = NumericTraits<SumType>::zero(); 00120 00121 SrcIterator src_ul = xs - Diff2D(x, y); 00122 SrcIterator src_lr = src_ul + Diff2D(src_width, src_height); 00123 00124 SrcIterator yys = xs; 00125 KernelIterator yk = ki; 00126 00127 // calculate width and height of the kernel 00128 int kernel_width = klr.x - kul.x + 1; 00129 int kernel_height = klr.y - kul.y + 1; 00130 00131 // where the kernel is beyond the borders: 00132 bool top_to_much = (y<klr.y) ? true : false; 00133 bool down_to_much = (src_height-y-1<-kul.y)? true : false; 00134 bool left_to_much = (x<klr.x)? true : false; 00135 bool right_to_much = (src_width-x-1<-kul.x)? true : false; 00136 00137 // direction of iteration, 00138 // e.g. (-1, +1) for ll->ur or (-1, -1) for lr->ul 00139 Diff2D way_increment; 00140 00141 /* Iteration is always done from valid to invalid range. 00142 The following tuple is composed as such: 00143 - If an invalid range is reached while iterating in X, 00144 a jump of border_increment.first is performed and 00145 border_increment.third is used for further iterating. 00146 - If an invalid range is reached while iterating in Y, 00147 a jump of border_increment.second is performed and 00148 border_increment.fourth is used for further iterating. 00149 */ 00150 tuple4<int, int, int, int> border_increment; 00151 if (border == BORDER_TREATMENT_REPEAT){ 00152 border_increment = tuple4<int, int, int, int>(1, 1, 0, 0); 00153 }else if (border == BORDER_TREATMENT_REFLECT){ 00154 border_increment = tuple4<int, int, int, int>(2, 2, -1, -1); 00155 }else{ // BORDER_TREATMENT_WRAP 00156 border_increment = tuple4<int, int, int, int>(src_width, src_height, 1, 1); 00157 } 00158 00159 pair<int, int> valid_step_count; 00160 00161 if(left_to_much && !top_to_much && !down_to_much) 00162 { 00163 yys += klr; 00164 yk += kul; 00165 way_increment = Diff2D(-1, -1); 00166 border_increment.third = -border_increment.third; 00167 border_increment.fourth = -border_increment.fourth; 00168 valid_step_count = std::make_pair((yys - src_ul).x + 1, kernel_height); 00169 } 00170 else if(top_to_much && !left_to_much && !right_to_much) 00171 { 00172 yys += klr; 00173 yk += kul; 00174 way_increment = Diff2D(-1, -1); 00175 border_increment.third = -border_increment.third; 00176 border_increment.fourth = -border_increment.fourth; 00177 valid_step_count = std::make_pair(kernel_width, (yys - src_ul).y + 1); 00178 } 00179 else if(right_to_much && !top_to_much && !down_to_much) 00180 { 00181 yys += kul; 00182 yk += klr; 00183 way_increment = Diff2D(1, 1); 00184 border_increment.first = -border_increment.first; 00185 border_increment.second = -border_increment.second; 00186 valid_step_count = std::make_pair((src_lr - yys).x, kernel_height); 00187 } 00188 else if(down_to_much && !left_to_much && !right_to_much) 00189 { 00190 yys += kul; 00191 yk += klr; 00192 way_increment = Diff2D(1, 1); 00193 border_increment.first = -border_increment.first; 00194 border_increment.second = -border_increment.second; 00195 valid_step_count = std::make_pair(kernel_width, (src_lr - yys).y); 00196 } 00197 else if(down_to_much && left_to_much) 00198 { 00199 yys += kul + Diff2D(kernel_width - 1, 0); 00200 yk += kul + Diff2D(0, kernel_height - 1); 00201 way_increment = Diff2D(-1, 1); 00202 border_increment.second = -border_increment.second; 00203 border_increment.third = -border_increment.third; 00204 valid_step_count = std::make_pair((yys - src_ul).x + 1, (src_lr - yys).y); 00205 } 00206 else if(down_to_much && right_to_much) 00207 { 00208 yys += kul; 00209 yk += klr; 00210 way_increment = Diff2D(1, 1); 00211 border_increment.first = -border_increment.first; 00212 border_increment.second = -border_increment.second; 00213 valid_step_count = std::make_pair((src_lr - yys).x, (src_lr - yys).y); 00214 } 00215 else if(top_to_much && left_to_much) 00216 { 00217 yys += klr; 00218 yk += kul; 00219 way_increment = Diff2D(-1, -1); 00220 border_increment.third = -border_increment.third; 00221 border_increment.fourth = -border_increment.fourth; 00222 valid_step_count = std::make_pair((yys - src_ul).x + 1, (yys - src_ul).y + 1); 00223 } 00224 else 00225 { //top_to_much && right_to_much 00226 yys += kul + Diff2D(0, kernel_height - 1); 00227 yk += kul + Diff2D(kernel_width - 1, 0); 00228 way_increment = Diff2D(1, -1); 00229 border_increment.first = -border_increment.first; 00230 border_increment.fourth = -border_increment.fourth; 00231 valid_step_count = std::make_pair((src_lr - yys).x, (yys - src_ul).y + 1); 00232 } 00233 00234 int yy = 0, xx; 00235 00236 //laeuft den zulässigen Bereich in y-Richtung durch 00237 for(; yy < valid_step_count.second; ++yy, yys.y += way_increment.y, yk.y -= way_increment.y ) 00238 { 00239 SrcIterator xxs = yys; 00240 KernelIterator xk = yk; 00241 00242 //laeuft den zulässigen Bereich in x-Richtung durch 00243 for(xx = 0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00244 { 00245 sum += ak(xk) * src_acc(xxs); 00246 } 00247 00248 //Nächstes ++xxs.x wuerde in unzulässigen Bereich 00249 //bringen => Sprung in zulaessigen Bereich 00250 xxs.x += border_increment.first; 00251 00252 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00253 { 00254 sum += ak(xk) * src_acc(xxs); 00255 } 00256 } 00257 00258 //Nächstes ++yys.y wuerde in unzulässigen Bereich 00259 //bringen => Sprung in zulaessigen Bereich 00260 yys.y += border_increment.second; 00261 00262 for( ; yy < kernel_height; ++yy, yys.y += border_increment.third, yk.y -= way_increment.y) 00263 { 00264 SrcIterator xxs = yys; 00265 KernelIterator xk = yk; 00266 00267 for(xx=0; xx < valid_step_count.first; ++xx, xxs.x += way_increment.x, xk.x -= way_increment.x) 00268 { 00269 sum += ak(xk) * src_acc(xxs); 00270 } 00271 00272 //Sprung in den zulaessigen Bereich 00273 xxs.x += border_increment.first; 00274 00275 for( ; xx < kernel_width; ++xx, xxs.x += border_increment.third, xk.x -= way_increment.x ) 00276 { 00277 sum += ak(xk) * src_acc(xxs); 00278 } 00279 } 00280 00281 // store average in destination pixel 00282 dest_acc.set(DestTraits::fromRealPromote(sum), xd); 00283 00284 }// end of internalPixelEvaluationByWrapReflectRepeat 00285 #endif /* #if 0 */ 00286 00287 00288 template <class SrcIterator, class SrcAccessor, 00289 class KernelIterator, class KernelAccessor, 00290 class SumType> 00291 void 00292 internalPixelEvaluationByWrapReflectRepeat(SrcIterator xs, SrcAccessor src_acc, 00293 KernelIterator xk, KernelAccessor ak, 00294 int left, int right, int kleft, int kright, 00295 int borderskipx, int borderinc, SumType & sum) 00296 { 00297 SrcIterator xxs = xs + left; 00298 KernelIterator xxk = xk - left; 00299 00300 for(int xx = left; xx <= right; ++xx, ++xxs, --xxk) 00301 { 00302 sum += ak(xxk) * src_acc(xxs); 00303 } 00304 00305 xxs = xs + left - borderskipx; 00306 xxk = xk - left + 1; 00307 for(int xx = left - 1; xx >= -kright; --xx, xxs -= borderinc, ++xxk) 00308 { 00309 sum += ak(xxk) * src_acc(xxs); 00310 } 00311 00312 xxs = xs + right + borderskipx; 00313 xxk = xk - right - 1; 00314 for(int xx = right + 1; xx <= -kleft; ++xx, xxs += borderinc, --xxk) 00315 { 00316 sum += ak(xxk) * src_acc(xxs); 00317 } 00318 } 00319 00320 00321 /** \addtogroup StandardConvolution Two-dimensional convolution functions 00322 00323 Perform 2D non-separable convolution, with and without ROI mask. 00324 00325 These generic convolution functions implement 00326 the standard 2D convolution operation for images that fit 00327 into the required interface. Arbitrary ROI's are supported 00328 by the mask version of the algorithm. 00329 The functions need a suitable 2D kernel to operate. 00330 */ 00331 //@{ 00332 00333 /** \brief Performs a 2 dimensional convolution of the source image using the given 00334 kernel. 00335 00336 The KernelIterator must point to the center of the kernel, and 00337 the kernel's size is given by its upper left (x and y of distance <= 0) and 00338 lower right (distance >= 0) corners. The image must always be larger than the 00339 kernel. At those positions where the kernel does not completely fit 00340 into the image, the specified \ref BorderTreatmentMode is 00341 applied. You can choice between following BorderTreatmentModes: 00342 <ul> 00343 <li>BORDER_TREATMENT_CLIP</li> 00344 <li>BORDER_TREATMENT_AVOID</li> 00345 <li>BORDER_TREATMENT_WRAP</li> 00346 <li>BORDER_TREATMENT_REFLECT</li> 00347 <li>BORDER_TREATMENT_REPEAT</li> 00348 </ul><br> 00349 The images's pixel type (SrcAccessor::value_type) must be a 00350 linear space over the kernel's value_type (KernelAccessor::value_type), 00351 i.e. addition of source values, multiplication with kernel values, 00352 and NumericTraits must be defined. 00353 The kernel's value_type must be an algebraic field, 00354 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00355 be defined. 00356 00357 <b> Declarations:</b> 00358 00359 pass arguments explicitly: 00360 \code 00361 namespace vigra { 00362 template <class SrcIterator, class SrcAccessor, 00363 class DestIterator, class DestAccessor, 00364 class KernelIterator, class KernelAccessor> 00365 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00366 DestIterator dest_ul, DestAccessor dest_acc, 00367 KernelIterator ki, KernelAccessor ak, 00368 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00369 } 00370 \endcode 00371 00372 00373 use argument objects in conjunction with \ref ArgumentObjectFactories : 00374 \code 00375 namespace vigra { 00376 template <class SrcIterator, class SrcAccessor, 00377 class DestIterator, class DestAccessor, 00378 class KernelIterator, class KernelAccessor> 00379 void convolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00380 pair<DestIterator, DestAccessor> dest, 00381 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00382 BorderTreatmentMode> kernel); 00383 } 00384 \endcode 00385 00386 <b> Usage:</b> 00387 00388 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 00389 Namespace: vigra 00390 00391 00392 \code 00393 vigra::FImage src(w,h), dest(w,h); 00394 ... 00395 00396 // define horizontal Sobel filter 00397 vigra::Kernel2D<float> sobel; 00398 00399 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00400 0.125, 0.0, -0.125, 00401 0.25, 0.0, -0.25, 00402 0.125, 0.0, -0.125; 00403 sobel.setBorderTreatment(vigra::BORDER_TREATMENT_REFLECT); 00404 00405 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 00406 \endcode 00407 00408 <b> Required Interface:</b> 00409 00410 \code 00411 ImageIterator src_ul, src_lr; 00412 ImageIterator dest_ul; 00413 ImageIterator ik; 00414 00415 SrcAccessor src_accessor; 00416 DestAccessor dest_accessor; 00417 KernelAccessor kernel_accessor; 00418 00419 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00420 00421 s = s + s; 00422 s = kernel_accessor(ik) * s; 00423 s -= s; 00424 00425 dest_accessor.set( 00426 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00427 00428 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00429 00430 k += k; 00431 k -= k; 00432 k = k / k; 00433 00434 \endcode 00435 00436 <b> Preconditions:</b> 00437 00438 \code 00439 kul.x <= 0 00440 kul.y <= 0 00441 klr.x >= 0 00442 klr.y >= 0 00443 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00444 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00445 \endcode 00446 00447 If border == BORDER_TREATMENT_CLIP: Sum of kernel elements must be 00448 != 0. 00449 00450 */ 00451 template <class SrcIterator, class SrcAccessor, 00452 class DestIterator, class DestAccessor, 00453 class KernelIterator, class KernelAccessor> 00454 void convolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00455 DestIterator dest_ul, DestAccessor dest_acc, 00456 KernelIterator ki, KernelAccessor ak, 00457 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00458 { 00459 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00460 border == BORDER_TREATMENT_AVOID || 00461 border == BORDER_TREATMENT_REFLECT || 00462 border == BORDER_TREATMENT_REPEAT || 00463 border == BORDER_TREATMENT_WRAP), 00464 "convolveImage():\n" 00465 " Border treatment must be one of follow treatments:\n" 00466 " - BORDER_TREATMENT_CLIP\n" 00467 " - BORDER_TREATMENT_AVOID\n" 00468 " - BORDER_TREATMENT_REFLECT\n" 00469 " - BORDER_TREATMENT_REPEAT\n" 00470 " - BORDER_TREATMENT_WRAP\n"); 00471 00472 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00473 "convolveImage(): coordinates of " 00474 "kernel's upper left must be <= 0."); 00475 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00476 "convolveImage(): coordinates of " 00477 "kernel's lower right must be >= 0."); 00478 00479 // use traits to determine SumType as to prevent possible overflow 00480 typedef typename 00481 PromoteTraits<typename SrcAccessor::value_type, 00482 typename KernelAccessor::value_type>::Promote SumType; 00483 typedef typename 00484 NumericTraits<typename KernelAccessor::value_type>::RealPromote KernelSumType; 00485 typedef typename DestAccessor::value_type DestType; 00486 00487 // calculate width and height of the image 00488 int w = src_lr.x - src_ul.x; 00489 int h = src_lr.y - src_ul.y; 00490 00491 // calculate width and height of the kernel 00492 int kernel_width = klr.x - kul.x + 1; 00493 int kernel_height = klr.y - kul.y + 1; 00494 00495 vigra_precondition(w >= kernel_width && h >= kernel_height, 00496 "convolveImage(): kernel larger than image."); 00497 00498 KernelSumType norm = NumericTraits<KernelSumType>::zero(); 00499 if(border == BORDER_TREATMENT_CLIP) 00500 { 00501 // calculate the sum of the kernel elements for renormalization 00502 KernelIterator yk = ki + klr; 00503 00504 // determine sum within kernel (= norm) 00505 for(int y = 0; y < kernel_height; ++y, --yk.y) 00506 { 00507 KernelIterator xk = yk; 00508 for(int x = 0; x < kernel_width; ++x, --xk.x) 00509 { 00510 norm += ak(xk); 00511 } 00512 } 00513 vigra_precondition(norm != NumericTraits<KernelSumType>::zero(), 00514 "convolveImage(): Cannot use BORDER_TREATMENT_CLIP with a DC-free kernel"); 00515 } 00516 00517 // create iterators for the interior part of the image (where the kernel always fits into the image) 00518 DestIterator yd = dest_ul + Diff2D(klr.x, klr.y); 00519 SrcIterator ys = src_ul + Diff2D(klr.x, klr.y); 00520 SrcIterator send = src_lr + Diff2D(kul.x, kul.y); 00521 00522 // iterate over the interior part 00523 for(; ys.y < send.y; ++ys.y, ++yd.y) 00524 { 00525 // create x iterators 00526 DestIterator xd(yd); 00527 SrcIterator xs(ys); 00528 00529 for(; xs.x < send.x; ++xs.x, ++xd.x) 00530 { 00531 // init the sum 00532 SumType sum = NumericTraits<SumType>::zero(); 00533 00534 SrcIterator yys = xs - klr; 00535 SrcIterator yyend = xs - kul; 00536 KernelIterator yk = ki + klr; 00537 00538 for(; yys.y <= yyend.y; ++yys.y, --yk.y) 00539 { 00540 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00541 typename SrcIterator::row_iterator xxe = xxs + kernel_width; 00542 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00543 00544 for(; xxs < xxe; ++xxs, --xk) 00545 { 00546 sum += ak(xk) * src_acc(xxs); 00547 } 00548 } 00549 00550 // store convolution result in destination pixel 00551 dest_acc.set(detail::RequiresExplicitCast<DestType>::cast(sum), xd); 00552 } 00553 } 00554 00555 if(border == BORDER_TREATMENT_AVOID) 00556 return; // skip processing near the border 00557 00558 int interiorskip = w + kul.x - klr.x - 1; 00559 int borderskipx = 0; 00560 int borderskipy = 0; 00561 int borderinc = 0; 00562 if(border == BORDER_TREATMENT_REPEAT) 00563 { 00564 borderskipx = 0; 00565 borderskipy = 0; 00566 borderinc = 0; 00567 } 00568 else if(border == BORDER_TREATMENT_REFLECT) 00569 { 00570 borderskipx = -1; 00571 borderskipy = -1; 00572 borderinc = -1; 00573 } 00574 else if(border == BORDER_TREATMENT_WRAP) 00575 { 00576 borderskipx = -w+1; 00577 borderskipy = -h+1; 00578 borderinc = 1; 00579 } 00580 00581 // create iterators for the entire image 00582 yd = dest_ul; 00583 ys = src_ul; 00584 00585 // work on entire image (but skip the already computed points in the loop) 00586 for(int y = 0; y < h; ++y, ++ys.y, ++yd.y) 00587 { 00588 int top = int(std::max(static_cast<IntBiggest>(-klr.y), 00589 static_cast<IntBiggest>(src_ul.y - ys.y))); 00590 int bottom = int(std::min(static_cast<IntBiggest>(-kul.y), 00591 static_cast<IntBiggest>(src_lr.y - ys.y - 1))); 00592 00593 // create x iterators 00594 DestIterator xd(yd); 00595 SrcIterator xs(ys); 00596 00597 for(int x = 0; x < w; ++x, ++xs.x, ++xd.x) 00598 { 00599 // check if we are away from the border 00600 if(y >= klr.y && y < h+kul.y && x == klr.x) 00601 { 00602 // yes => skip the already computed points 00603 x += interiorskip; 00604 xs.x += interiorskip; 00605 xd.x += interiorskip; 00606 continue; 00607 } 00608 if (border == BORDER_TREATMENT_CLIP) 00609 { 00610 internalPixelEvaluationByClip(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, norm); 00611 } 00612 else 00613 { 00614 int left = std::max(-klr.x, src_ul.x - xs.x); 00615 int right = std::min(-kul.x, src_lr.x - xs.x - 1); 00616 00617 // init the sum 00618 SumType sum = NumericTraits<SumType>::zero(); 00619 00620 // create iterators for the part of the kernel that fits into the image 00621 SrcIterator yys = xs + Size2D(0, top); 00622 KernelIterator yk = ki - Size2D(0, top); 00623 00624 int yy; 00625 for(yy = top; yy <= bottom; ++yy, ++yys.y, --yk.y) 00626 { 00627 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00628 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00629 } 00630 yys = xs + Size2D(0, top - borderskipy); 00631 yk = ki - Size2D(0, top - 1); 00632 for(yy = top - 1; yy >= -klr.y; --yy, yys.y -= borderinc, ++yk.y) 00633 { 00634 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00635 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00636 } 00637 yys = xs + Size2D(0, bottom + borderskipy); 00638 yk = ki - Size2D(0, bottom + 1); 00639 for(yy = bottom + 1; yy <= -kul.y; ++yy, yys.y += borderinc, --yk.y) 00640 { 00641 internalPixelEvaluationByWrapReflectRepeat(yys.rowIterator(), src_acc, yk.rowIterator(), ak, 00642 left, right, kul.x, klr.x, borderskipx, borderinc, sum); 00643 } 00644 00645 // store convolution result in destination pixel 00646 dest_acc.set(detail::RequiresExplicitCast<DestType>::cast(sum), xd); 00647 00648 // internalPixelEvaluationByWrapReflectRepeat(x, y, w, h, xs, src_acc, xd, dest_acc, ki, kul, klr, ak, border); 00649 } 00650 } 00651 } 00652 } 00653 00654 00655 template <class SrcIterator, class SrcAccessor, 00656 class DestIterator, class DestAccessor, 00657 class KernelIterator, class KernelAccessor> 00658 inline 00659 void convolveImage( 00660 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00661 pair<DestIterator, DestAccessor> dest, 00662 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00663 BorderTreatmentMode> kernel) 00664 { 00665 convolveImage(src.first, src.second, src.third, 00666 dest.first, dest.second, 00667 kernel.first, kernel.second, kernel.third, 00668 kernel.fourth, kernel.fifth); 00669 } 00670 00671 00672 /** \brief Performs a 2-dimensional normalized convolution, i.e. convolution with a mask image. 00673 00674 This functions computes 00675 <a href ="http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/PIRODDI1/NormConv/NormConv.html">normalized 00676 convolution</a> as defined in 00677 Knutsson, H. and Westin, C-F.: <i>Normalized and differential convolution: 00678 Methods for Interpolation and Filtering of incomplete and uncertain data</i>. 00679 Proc. of the IEEE Conf. on Computer Vision and Pattern Recognition, 1993, 515-523. 00680 00681 The mask image must be binary and encodes which pixels of the original image 00682 are valid. It is used as follows: 00683 Only pixel under the mask are used in the calculations. Whenever a part of the 00684 kernel lies outside the mask, it is ignored, and the kernel is renormalized to its 00685 original norm (analogous to the CLIP \ref BorderTreatmentMode). Thus, a useful convolution 00686 result is computed whenever <i>at least one valid pixel is within the current window</i> 00687 Thus, destination pixels not under the mask still receive a value if they are <i>near</i> 00688 the mask. Therefore, this algorithm is useful as an interpolator of sparse input data. 00689 If you are only interested in the destination values under the mask, you can perform 00690 a subsequent \ref copyImageIf(). 00691 00692 The KernelIterator must point to the center of the kernel, and 00693 the kernel's size is given by its upper left (x and y of distance <= 0) and 00694 lower right (distance >= 0) corners. The image must always be larger than the 00695 kernel. At those positions where the kernel does not completely fit 00696 into the image, the specified \ref BorderTreatmentMode is 00697 applied. Only BORDER_TREATMENT_CLIP and BORDER_TREATMENT_AVOID are currently 00698 supported. 00699 00700 The images's pixel type (SrcAccessor::value_type) must be a 00701 linear space over the kernel's value_type (KernelAccessor::value_type), 00702 i.e. addition of source values, multiplication with kernel values, 00703 and NumericTraits must be defined. 00704 The kernel's value_type must be an algebraic field, 00705 i.e. the arithmetic operations (+, -, *, /) and NumericTraits must 00706 be defined. 00707 00708 <b> Declarations:</b> 00709 00710 pass arguments explicitly: 00711 \code 00712 namespace vigra { 00713 template <class SrcIterator, class SrcAccessor, 00714 class MaskIterator, class MaskAccessor, 00715 class DestIterator, class DestAccessor, 00716 class KernelIterator, class KernelAccessor> 00717 void 00718 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00719 MaskIterator mul, MaskAccessor am, 00720 DestIterator dest_ul, DestAccessor dest_acc, 00721 KernelIterator ki, KernelAccessor ak, 00722 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00723 } 00724 \endcode 00725 00726 00727 use argument objects in conjunction with \ref ArgumentObjectFactories : 00728 \code 00729 namespace vigra { 00730 template <class SrcIterator, class SrcAccessor, 00731 class MaskIterator, class MaskAccessor, 00732 class DestIterator, class DestAccessor, 00733 class KernelIterator, class KernelAccessor> 00734 void normalizedConvolveImage(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00735 pair<MaskIterator, MaskAccessor> mask, 00736 pair<DestIterator, DestAccessor> dest, 00737 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00738 BorderTreatmentMode> kernel); 00739 } 00740 \endcode 00741 00742 <b> Usage:</b> 00743 00744 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 00745 Namespace: vigra 00746 00747 00748 \code 00749 vigra::FImage src(w,h), dest(w,h); 00750 vigra::CImage mask(w,h); 00751 ... 00752 00753 // define 3x3 binomial filter 00754 vigra::Kernel2D<float> binom; 00755 00756 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 00757 0.0625, 0.125, 0.0625, 00758 0.125, 0.25, 0.125, 00759 0.0625, 0.125, 0.0625; 00760 00761 vigra::normalizedConvolveImage(srcImageRange(src), maskImage(mask), destImage(dest), kernel2d(binom)); 00762 \endcode 00763 00764 <b> Required Interface:</b> 00765 00766 \code 00767 ImageIterator src_ul, src_lr; 00768 ImageIterator mul; 00769 ImageIterator dest_ul; 00770 ImageIterator ik; 00771 00772 SrcAccessor src_accessor; 00773 MaskAccessor mask_accessor; 00774 DestAccessor dest_accessor; 00775 KernelAccessor kernel_accessor; 00776 00777 NumericTraits<SrcAccessor::value_type>::RealPromote s = src_accessor(src_ul); 00778 00779 s = s + s; 00780 s = kernel_accessor(ik) * s; 00781 s -= s; 00782 00783 if(mask_accessor(mul)) ...; 00784 00785 dest_accessor.set( 00786 NumericTraits<DestAccessor::value_type>::fromRealPromote(s), dest_ul); 00787 00788 NumericTraits<KernelAccessor::value_type>::RealPromote k = kernel_accessor(ik); 00789 00790 k += k; 00791 k -= k; 00792 k = k / k; 00793 00794 \endcode 00795 00796 <b> Preconditions:</b> 00797 00798 \code 00799 kul.x <= 0 00800 kul.y <= 0 00801 klr.x >= 0 00802 klr.y >= 0 00803 src_lr.x - src_ul.x >= klr.x + kul.x + 1 00804 src_lr.y - src_ul.y >= klr.y + kul.y + 1 00805 border == BORDER_TREATMENT_CLIP || border == BORDER_TREATMENT_AVOID 00806 \endcode 00807 00808 Sum of kernel elements must be != 0. 00809 00810 */ 00811 doxygen_overloaded_function(template <...> void normalizedConvolveImage) 00812 00813 template <class SrcIterator, class SrcAccessor, 00814 class DestIterator, class DestAccessor, 00815 class MaskIterator, class MaskAccessor, 00816 class KernelIterator, class KernelAccessor> 00817 void 00818 normalizedConvolveImage(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00819 MaskIterator mul, MaskAccessor am, 00820 DestIterator dest_ul, DestAccessor dest_acc, 00821 KernelIterator ki, KernelAccessor ak, 00822 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 00823 { 00824 vigra_precondition((border == BORDER_TREATMENT_CLIP || 00825 border == BORDER_TREATMENT_AVOID), 00826 "normalizedConvolveImage(): " 00827 "Border treatment must be BORDER_TREATMENT_CLIP or BORDER_TREATMENT_AVOID."); 00828 00829 vigra_precondition(kul.x <= 0 && kul.y <= 0, 00830 "normalizedConvolveImage(): left borders must be <= 0."); 00831 vigra_precondition(klr.x >= 0 && klr.y >= 0, 00832 "normalizedConvolveImage(): right borders must be >= 0."); 00833 00834 // use traits to determine SumType as to prevent possible overflow 00835 typedef typename 00836 NumericTraits<typename SrcAccessor::value_type>::RealPromote SumType; 00837 typedef typename 00838 NumericTraits<typename KernelAccessor::value_type>::RealPromote KSumType; 00839 typedef 00840 NumericTraits<typename DestAccessor::value_type> DestTraits; 00841 00842 // calculate width and height of the image 00843 int w = src_lr.x - src_ul.x; 00844 int h = src_lr.y - src_ul.y; 00845 int kernel_width = klr.x - kul.x + 1; 00846 int kernel_height = klr.y - kul.y + 1; 00847 00848 int x,y; 00849 int ystart = (border == BORDER_TREATMENT_AVOID) ? klr.y : 0; 00850 int yend = (border == BORDER_TREATMENT_AVOID) ? h+kul.y : h; 00851 int xstart = (border == BORDER_TREATMENT_AVOID) ? klr.x : 0; 00852 int xend = (border == BORDER_TREATMENT_AVOID) ? w+kul.x : w; 00853 00854 // create y iterators 00855 DestIterator yd = dest_ul + Diff2D(xstart, ystart); 00856 SrcIterator ys = src_ul + Diff2D(xstart, ystart); 00857 MaskIterator ym = mul + Diff2D(xstart, ystart); 00858 00859 KSumType norm = ak(ki); 00860 int xx, yy; 00861 KernelIterator yk = ki + klr; 00862 for(yy=0; yy<kernel_height; ++yy, --yk.y) 00863 { 00864 KernelIterator xk = yk; 00865 00866 for(xx=0; xx<kernel_width; ++xx, --xk.x) 00867 { 00868 norm += ak(xk); 00869 } 00870 } 00871 norm -= ak(ki); 00872 00873 00874 for(y=ystart; y < yend; ++y, ++ys.y, ++yd.y, ++ym.y) 00875 { 00876 // create x iterators 00877 DestIterator xd(yd); 00878 SrcIterator xs(ys); 00879 MaskIterator xm(ym); 00880 00881 for(x=xstart; x < xend; ++x, ++xs.x, ++xd.x, ++xm.x) 00882 { 00883 // how much of the kernel fits into the image ? 00884 int x0, y0, x1, y1; 00885 00886 y0 = (y<klr.y) ? -y : -klr.y; 00887 y1 = (h-y-1<-kul.y) ? h-y-1 : -kul.y; 00888 x0 = (x<klr.x) ? -x : -klr.x; 00889 x1 = (w-x-1<-kul.x) ? w-x-1 : -kul.x; 00890 00891 bool first = true; 00892 // init the sum 00893 SumType sum; 00894 KSumType ksum; 00895 00896 SrcIterator yys = xs + Diff2D(x0, y0); 00897 MaskIterator yym = xm + Diff2D(x0, y0); 00898 KernelIterator yk = ki - Diff2D(x0, y0); 00899 00900 int xx, kernel_width, kernel_height; 00901 kernel_width = x1 - x0 + 1; 00902 kernel_height = y1 - y0 + 1; 00903 for(yy=0; yy<kernel_height; ++yy, ++yys.y, --yk.y, ++yym.y) 00904 { 00905 typename SrcIterator::row_iterator xxs = yys.rowIterator(); 00906 typename SrcIterator::row_iterator xxend = xxs + kernel_width; 00907 typename MaskIterator::row_iterator xxm = yym.rowIterator(); 00908 typename KernelIterator::row_iterator xk = yk.rowIterator(); 00909 00910 for(xx=0; xxs < xxend; ++xxs, --xk, ++xxm) 00911 { 00912 if(!am(xxm)) continue; 00913 00914 if(first) 00915 { 00916 sum = detail::RequiresExplicitCast<SumType>::cast(ak(xk) * src_acc(xxs)); 00917 ksum = ak(xk); 00918 first = false; 00919 } 00920 else 00921 { 00922 sum = detail::RequiresExplicitCast<SumType>::cast(sum + ak(xk) * src_acc(xxs)); 00923 ksum += ak(xk); 00924 } 00925 } 00926 } 00927 // store average in destination pixel 00928 if(!first && 00929 ksum != NumericTraits<KSumType>::zero()) 00930 { 00931 dest_acc.set(DestTraits::fromRealPromote( 00932 detail::RequiresExplicitCast<SumType>::cast((norm / ksum) * sum)), xd); 00933 } 00934 } 00935 } 00936 } 00937 00938 00939 template <class SrcIterator, class SrcAccessor, 00940 class DestIterator, class DestAccessor, 00941 class MaskIterator, class MaskAccessor, 00942 class KernelIterator, class KernelAccessor> 00943 inline 00944 void normalizedConvolveImage( 00945 triple<SrcIterator, SrcIterator, SrcAccessor> src, 00946 pair<MaskIterator, MaskAccessor> mask, 00947 pair<DestIterator, DestAccessor> dest, 00948 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00949 BorderTreatmentMode> kernel) 00950 { 00951 normalizedConvolveImage(src.first, src.second, src.third, 00952 mask.first, mask.second, 00953 dest.first, dest.second, 00954 kernel.first, kernel.second, kernel.third, 00955 kernel.fourth, kernel.fifth); 00956 } 00957 00958 /** \brief Deprecated name of 2-dimensional normalized convolution, i.e. convolution with a mask image. 00959 00960 See \ref normalizedConvolveImage() for documentation. 00961 00962 <b> Declarations:</b> 00963 00964 pass arguments explicitly: 00965 \code 00966 namespace vigra { 00967 template <class SrcIterator, class SrcAccessor, 00968 class MaskIterator, class MaskAccessor, 00969 class DestIterator, class DestAccessor, 00970 class KernelIterator, class KernelAccessor> 00971 void 00972 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 00973 MaskIterator mul, MaskAccessor am, 00974 DestIterator dest_ul, DestAccessor dest_acc, 00975 KernelIterator ki, KernelAccessor ak, 00976 Diff2D kul, Diff2D klr, BorderTreatmentMode border); 00977 } 00978 \endcode 00979 00980 00981 use argument objects in conjunction with \ref ArgumentObjectFactories : 00982 \code 00983 namespace vigra { 00984 template <class SrcIterator, class SrcAccessor, 00985 class MaskIterator, class MaskAccessor, 00986 class DestIterator, class DestAccessor, 00987 class KernelIterator, class KernelAccessor> 00988 void convolveImageWithMask(triple<SrcIterator, SrcIterator, SrcAccessor> src, 00989 pair<MaskIterator, MaskAccessor> mask, 00990 pair<DestIterator, DestAccessor> dest, 00991 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 00992 BorderTreatmentMode> kernel); 00993 } 00994 \endcode 00995 */ 00996 doxygen_overloaded_function(template <...> void convolveImageWithMask) 00997 00998 template <class SrcIterator, class SrcAccessor, 00999 class DestIterator, class DestAccessor, 01000 class MaskIterator, class MaskAccessor, 01001 class KernelIterator, class KernelAccessor> 01002 inline void 01003 convolveImageWithMask(SrcIterator src_ul, SrcIterator src_lr, SrcAccessor src_acc, 01004 MaskIterator mul, MaskAccessor am, 01005 DestIterator dest_ul, DestAccessor dest_acc, 01006 KernelIterator ki, KernelAccessor ak, 01007 Diff2D kul, Diff2D klr, BorderTreatmentMode border) 01008 { 01009 normalizedConvolveImage(src_ul, src_lr, src_acc, 01010 mul, am, 01011 dest_ul, dest_acc, 01012 ki, ak, kul, klr, border); 01013 } 01014 01015 template <class SrcIterator, class SrcAccessor, 01016 class DestIterator, class DestAccessor, 01017 class MaskIterator, class MaskAccessor, 01018 class KernelIterator, class KernelAccessor> 01019 inline 01020 void convolveImageWithMask( 01021 triple<SrcIterator, SrcIterator, SrcAccessor> src, 01022 pair<MaskIterator, MaskAccessor> mask, 01023 pair<DestIterator, DestAccessor> dest, 01024 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, 01025 BorderTreatmentMode> kernel) 01026 { 01027 normalizedConvolveImage(src.first, src.second, src.third, 01028 mask.first, mask.second, 01029 dest.first, dest.second, 01030 kernel.first, kernel.second, kernel.third, 01031 kernel.fourth, kernel.fifth); 01032 } 01033 01034 //@} 01035 01036 /********************************************************/ 01037 /* */ 01038 /* Kernel2D */ 01039 /* */ 01040 /********************************************************/ 01041 01042 /** \brief Generic 2 dimensional convolution kernel. 01043 01044 This kernel may be used for convolution of 2 dimensional signals. 01045 01046 Convolution functions access the kernel via an ImageIterator 01047 which they get by calling \ref center(). This iterator 01048 points to the center of the kernel. The kernel's size is given by its upperLeft() 01049 (upperLeft().x <= 0, upperLeft().y <= 0) 01050 and lowerRight() (lowerRight().x >= 0, lowerRight().y >= 0) methods. 01051 The desired border treatment mode is returned by borderTreatment(). 01052 (Note that the \ref StandardConvolution "2D convolution functions" don't currently 01053 support all modes.) 01054 01055 The different init functions create a kernel with the specified 01056 properties. The requirements for the kernel's value_type depend 01057 on the init function used. At least NumericTraits must be defined. 01058 01059 The kernel defines a factory function kernel2d() to create an argument object 01060 (see \ref KernelArgumentObjectFactories). 01061 01062 <b> Usage:</b> 01063 01064 <b>\#include</b> <<a href="stdconvolution_8hxx-source.html">vigra/stdconvolution.hxx</a>><br> 01065 Namespace: vigra 01066 01067 \code 01068 vigra::FImage src(w,h), dest(w,h); 01069 ... 01070 01071 // define horizontal Sobel filter 01072 vigra::Kernel2D<float> sobel; 01073 01074 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = // upper left and lower right 01075 0.125, 0.0, -0.125, 01076 0.25, 0.0, -0.25, 01077 0.125, 0.0, -0.125; 01078 01079 vigra::convolveImage(srcImageRange(src), destImage(dest), kernel2d(sobel)); 01080 \endcode 01081 01082 <b> Required Interface:</b> 01083 01084 \code 01085 value_type v = NumericTraits<value_type>::one(); 01086 \endcode 01087 01088 See also the init functions. 01089 */ 01090 template <class ARITHTYPE> 01091 class Kernel2D 01092 { 01093 public: 01094 /** the kernel's value type 01095 */ 01096 typedef ARITHTYPE value_type; 01097 01098 /** 2D random access iterator over the kernel's values 01099 */ 01100 typedef typename BasicImage<value_type>::traverser Iterator; 01101 01102 /** const 2D random access iterator over the kernel's values 01103 */ 01104 typedef typename BasicImage<value_type>::const_traverser ConstIterator; 01105 01106 /** the kernel's accessor 01107 */ 01108 typedef typename BasicImage<value_type>::Accessor Accessor; 01109 01110 /** the kernel's const accessor 01111 */ 01112 typedef typename BasicImage<value_type>::ConstAccessor ConstAccessor; 01113 01114 struct InitProxy 01115 { 01116 typedef typename 01117 BasicImage<value_type>::ScanOrderIterator Iterator; 01118 01119 InitProxy(Iterator i, int count, value_type & norm) 01120 : iter_(i), base_(i), 01121 count_(count), sum_(count), 01122 norm_(norm) 01123 {} 01124 01125 ~InitProxy() 01126 { 01127 vigra_precondition(count_ == 1 || count_ == sum_, 01128 "Kernel2D::initExplicitly(): " 01129 "Too few init values."); 01130 } 01131 01132 InitProxy & operator,(value_type const & v) 01133 { 01134 if(count_ == sum_) norm_ = *iter_; 01135 01136 --count_; 01137 vigra_precondition(count_ > 0, 01138 "Kernel2D::initExplicitly(): " 01139 "Too many init values."); 01140 01141 norm_ += v; 01142 01143 ++iter_; 01144 *iter_ = v; 01145 01146 return *this; 01147 } 01148 01149 Iterator iter_, base_; 01150 int count_, sum_; 01151 value_type & norm_; 01152 }; 01153 01154 static value_type one() { return NumericTraits<value_type>::one(); } 01155 01156 /** Default constructor. 01157 Creates a kernel of size 1x1 which would copy the signal 01158 unchanged. 01159 */ 01160 Kernel2D() 01161 : kernel_(1, 1, one()), 01162 left_(0, 0), 01163 right_(0, 0), 01164 norm_(one()), 01165 border_treatment_(BORDER_TREATMENT_CLIP) 01166 {} 01167 01168 /** Copy constructor. 01169 */ 01170 Kernel2D(Kernel2D const & k) 01171 : kernel_(k.kernel_), 01172 left_(k.left_), 01173 right_(k.right_), 01174 norm_(k.norm_), 01175 border_treatment_(k.border_treatment_) 01176 {} 01177 01178 /** Copy assignment. 01179 */ 01180 Kernel2D & operator=(Kernel2D const & k) 01181 { 01182 if(this != &k) 01183 { 01184 kernel_ = k.kernel_; 01185 left_ = k.left_; 01186 right_ = k.right_; 01187 norm_ = k.norm_; 01188 border_treatment_ = k.border_treatment_; 01189 } 01190 return *this; 01191 } 01192 01193 /** Initialization. 01194 This initializes the kernel with the given constant. The norm becomes 01195 v*width()*height(). 01196 01197 Instead of a single value an initializer list of length width()*height() 01198 can be used like this: 01199 01200 \code 01201 vigra::Kernel2D<float> binom; 01202 01203 binom.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01204 0.0625, 0.125, 0.0625, 01205 0.125, 0.25, 0.125, 01206 0.0625, 0.125, 0.0625; 01207 \endcode 01208 01209 In this case, the norm will be set to the sum of the init values. 01210 An initializer list of wrong length will result in a run-time error. 01211 */ 01212 InitProxy operator=(value_type const & v) 01213 { 01214 int size = (right_.x - left_.x + 1) * 01215 (right_.y - left_.y + 1); 01216 kernel_ = v; 01217 norm_ = (double)size*v; 01218 01219 return InitProxy(kernel_.begin(), size, norm_); 01220 } 01221 01222 /** Destructor. 01223 */ 01224 ~Kernel2D() 01225 {} 01226 01227 /** Init the 2D kernel as the cartesian product of two 1D kernels 01228 of type \ref Kernel1D. The norm becomes the product of the two original 01229 norms. 01230 01231 <b> Required Interface:</b> 01232 01233 The kernel's value_type must be a linear algebra. 01234 01235 \code 01236 vigra::Kernel2D<...>::value_type v; 01237 v = v * v; 01238 \endcode 01239 */ 01240 void initSeparable(Kernel1D<value_type> const & kx, 01241 Kernel1D<value_type> const & ky) 01242 { 01243 left_ = Diff2D(kx.left(), ky.left()); 01244 right_ = Diff2D(kx.right(), ky.right()); 01245 int w = right_.x - left_.x + 1; 01246 int h = right_.y - left_.y + 1; 01247 kernel_.resize(w, h); 01248 01249 norm_ = kx.norm() * ky.norm(); 01250 01251 typedef typename Kernel1D<value_type>::const_iterator KIter; 01252 typename Kernel1D<value_type>::Accessor ka; 01253 01254 KIter kiy = ky.center() + left_.y; 01255 Iterator iy = center() + left_; 01256 01257 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01258 { 01259 KIter kix = kx.center() + left_.x; 01260 Iterator ix = iy; 01261 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01262 { 01263 *ix = ka(kix) * ka(kiy); 01264 } 01265 } 01266 } 01267 01268 /** Init the 2D kernel as the cartesian product of two 1D kernels 01269 given explicitly by iterators and sizes. The norm becomes the 01270 sum of the resulting kernel values. 01271 01272 <b> Required Interface:</b> 01273 01274 The kernel's value_type must be a linear algebra. 01275 01276 \code 01277 vigra::Kernel2D<...>::value_type v; 01278 v = v * v; 01279 v += v; 01280 \endcode 01281 01282 <b> Preconditions:</b> 01283 01284 \code 01285 xleft <= 0; 01286 xright >= 0; 01287 yleft <= 0; 01288 yright >= 0; 01289 \endcode 01290 */ 01291 template <class KernelIterator> 01292 void initSeparable(KernelIterator kxcenter, int xleft, int xright, 01293 KernelIterator kycenter, int yleft, int yright) 01294 { 01295 vigra_precondition(xleft <= 0 && yleft <= 0, 01296 "Kernel2D::initSeparable(): left borders must be <= 0."); 01297 vigra_precondition(xright >= 0 && yright >= 0, 01298 "Kernel2D::initSeparable(): right borders must be >= 0."); 01299 01300 left_ = Point2D(xleft, yleft); 01301 right_ = Point2D(xright, yright); 01302 01303 int w = right_.x - left_.x + 1; 01304 int h = right_.y - left_.y + 1; 01305 kernel_.resize(w, h); 01306 01307 KernelIterator kiy = kycenter + left_.y; 01308 Iterator iy = center() + left_; 01309 01310 for(int y=left_.y; y<=right_.y; ++y, ++kiy, ++iy.y) 01311 { 01312 KernelIterator kix = kxcenter + left_.x; 01313 Iterator ix = iy; 01314 for(int x=left_.x; x<=right_.x; ++x, ++kix, ++ix.x) 01315 { 01316 *ix = *kix * *kiy; 01317 } 01318 } 01319 01320 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01321 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01322 norm_ = *i; 01323 ++i; 01324 01325 for(; i!= iend; ++i) 01326 { 01327 norm_ += *i; 01328 } 01329 } 01330 01331 /** Init as a 2D Gaussian function with given standard deviation and norm. 01332 */ 01333 void initGaussian(double std_dev, value_type norm) 01334 { 01335 Kernel1D<value_type> gauss; 01336 gauss.initGaussian(std_dev, norm); 01337 initSeparable(gauss, gauss); 01338 } 01339 01340 /** Init as a 2D Gaussian function with given standard deviation and unit norm. 01341 */ 01342 void initGaussian(double std_dev) 01343 { 01344 initGaussian(std_dev, NumericTraits<value_type>::one()); 01345 } 01346 01347 /** Init the 2D kernel as a circular averaging filter. The norm will be 01348 calculated as 01349 <TT>NumericTraits<value_type>::one() / (number of non-zero kernel values)</TT>. 01350 The kernel's value_type must be a linear space. 01351 01352 <b> Required Interface:</b> 01353 01354 \code 01355 value_type v = vigra::NumericTraits<value_type>::one(); 01356 01357 double d; 01358 v = d * v; 01359 \endcode 01360 01361 <b> Precondition:</b> 01362 01363 \code 01364 radius > 0; 01365 \endcode 01366 */ 01367 void initDisk(int radius) 01368 { 01369 vigra_precondition(radius > 0, 01370 "Kernel2D::initDisk(): radius must be > 0."); 01371 01372 left_ = Point2D(-radius, -radius); 01373 right_ = Point2D(radius, radius); 01374 int w = right_.x - left_.x + 1; 01375 int h = right_.y - left_.y + 1; 01376 kernel_.resize(w, h); 01377 norm_ = NumericTraits<value_type>::one(); 01378 01379 kernel_ = NumericTraits<value_type>::zero(); 01380 double count = 0.0; 01381 01382 Iterator k = center(); 01383 double r2 = (double)radius*radius; 01384 01385 int i; 01386 for(i=0; i<= radius; ++i) 01387 { 01388 double r = (double) i - 0.5; 01389 int w = (int)(VIGRA_CSTD::sqrt(r2 - r*r) + 0.5); 01390 for(int j=-w; j<=w; ++j) 01391 { 01392 k(j, i) = NumericTraits<value_type>::one(); 01393 k(j, -i) = NumericTraits<value_type>::one(); 01394 count += (i != 0) ? 2.0 : 1.0; 01395 } 01396 } 01397 01398 count = 1.0 / count; 01399 01400 for(int y=-radius; y<=radius; ++y) 01401 { 01402 for(int x=-radius; x<=radius; ++x) 01403 { 01404 k(x,y) = count * k(x,y); 01405 } 01406 } 01407 } 01408 01409 /** Init the kernel by an explicit initializer list. 01410 The upper left and lower right corners of the kernel must be passed. 01411 A comma-separated initializer list is given after the assignment operator. 01412 This function is used like this: 01413 01414 \code 01415 // define horizontal Sobel filter 01416 vigra::Kernel2D<float> sobel; 01417 01418 sobel.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 01419 0.125, 0.0, -0.125, 01420 0.25, 0.0, -0.25, 01421 0.125, 0.0, -0.125; 01422 \endcode 01423 01424 The norm is set to the sum of the initialzer values. If the wrong number of 01425 values is given, a run-time error results. It is, however, possible to give 01426 just one initializer. This creates an averaging filter with the given constant: 01427 01428 \code 01429 vigra::Kernel2D<float> average3x3; 01430 01431 average3x3.initExplicitly(Diff2D(-1,-1), Diff2D(1,1)) = 1.0/9.0; 01432 \endcode 01433 01434 Here, the norm is set to value*width()*height(). 01435 01436 <b> Preconditions:</b> 01437 01438 \code 01439 1. upperleft.x <= 0; 01440 2. upperleft.y <= 0; 01441 3. lowerright.x >= 0; 01442 4. lowerright.y >= 0; 01443 5. the number of values in the initializer list 01444 is 1 or equals the size of the kernel. 01445 \endcode 01446 */ 01447 Kernel2D & initExplicitly(Diff2D upperleft, Diff2D lowerright) 01448 { 01449 vigra_precondition(upperleft.x <= 0 && upperleft.y <= 0, 01450 "Kernel2D::initExplicitly(): left borders must be <= 0."); 01451 vigra_precondition(lowerright.x >= 0 && lowerright.y >= 0, 01452 "Kernel2D::initExplicitly(): right borders must be >= 0."); 01453 01454 left_ = Point2D(upperleft); 01455 right_ = Point2D(lowerright); 01456 01457 int w = right_.x - left_.x + 1; 01458 int h = right_.y - left_.y + 1; 01459 kernel_.resize(w, h); 01460 01461 return *this; 01462 } 01463 01464 /** Coordinates of the upper left corner of the kernel. 01465 */ 01466 Point2D upperLeft() const { return left_; } 01467 01468 /** Coordinates of the lower right corner of the kernel. 01469 */ 01470 Point2D lowerRight() const { return right_; } 01471 01472 /** Width of the kernel. 01473 */ 01474 int width() const { return right_.x - left_.x + 1; } 01475 01476 /** Height of the kernel. 01477 */ 01478 int height() const { return right_.y - left_.y + 1; } 01479 01480 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01481 */ 01482 Iterator center() { return kernel_.upperLeft() - left_; } 01483 01484 /** ImageIterator that points to the center of the kernel (coordinate (0,0)). 01485 */ 01486 ConstIterator center() const { return kernel_.upperLeft() - left_; } 01487 01488 /** Access kernel entry at given position. 01489 */ 01490 value_type & operator()(int x, int y) 01491 { return kernel_[Diff2D(x,y) - left_]; } 01492 01493 /** Read kernel entry at given position. 01494 */ 01495 value_type operator()(int x, int y) const 01496 { return kernel_[Diff2D(x,y) - left_]; } 01497 01498 /** Access kernel entry at given position. 01499 */ 01500 value_type & operator[](Diff2D const & d) 01501 { return kernel_[d - left_]; } 01502 01503 /** Read kernel entry at given position. 01504 */ 01505 value_type operator[](Diff2D const & d) const 01506 { return kernel_[d - left_]; } 01507 01508 /** Norm of the kernel (i.e. sum of its elements). 01509 */ 01510 value_type norm() const { return norm_; } 01511 01512 /** The kernels default accessor. 01513 */ 01514 Accessor accessor() { return Accessor(); } 01515 01516 /** The kernels default const accessor. 01517 */ 01518 ConstAccessor accessor() const { return ConstAccessor(); } 01519 01520 /** Normalize the kernel to the given value. (The norm is the sum of all kernel 01521 elements.) The kernel's value_type must be a division algebra or 01522 algebraic field. 01523 01524 <b> Required Interface:</b> 01525 01526 \code 01527 value_type v = vigra::NumericTraits<value_type>::one(); // if norm is not 01528 // given explicitly 01529 01530 v += v; 01531 v = v * v; 01532 v = v / v; 01533 \endcode 01534 */ 01535 void normalize(value_type norm) 01536 { 01537 typename BasicImage<value_type>::iterator i = kernel_.begin(); 01538 typename BasicImage<value_type>::iterator iend = kernel_.end(); 01539 typename NumericTraits<value_type>::RealPromote sum = *i; 01540 ++i; 01541 01542 for(; i!= iend; ++i) 01543 { 01544 sum += *i; 01545 } 01546 01547 sum = norm / sum; 01548 i = kernel_.begin(); 01549 for(; i != iend; ++i) 01550 { 01551 *i = *i * sum; 01552 } 01553 01554 norm_ = norm; 01555 } 01556 01557 /** Normalize the kernel to norm 1. 01558 */ 01559 void normalize() 01560 { 01561 normalize(one()); 01562 } 01563 01564 /** current border treatment mode 01565 */ 01566 BorderTreatmentMode borderTreatment() const 01567 { return border_treatment_; } 01568 01569 /** Set border treatment mode. 01570 Only <TT>BORDER_TREATMENT_CLIP</TT> and <TT>BORDER_TREATMENT_AVOID</TT> are currently 01571 allowed. 01572 */ 01573 void setBorderTreatment( BorderTreatmentMode new_mode) 01574 { 01575 vigra_precondition((new_mode == BORDER_TREATMENT_CLIP || 01576 new_mode == BORDER_TREATMENT_AVOID || 01577 new_mode == BORDER_TREATMENT_REFLECT || 01578 new_mode == BORDER_TREATMENT_REPEAT || 01579 new_mode == BORDER_TREATMENT_WRAP), 01580 "convolveImage():\n" 01581 " Border treatment must be one of follow treatments:\n" 01582 " - BORDER_TREATMENT_CLIP\n" 01583 " - BORDER_TREATMENT_AVOID\n" 01584 " - BORDER_TREATMENT_REFLECT\n" 01585 " - BORDER_TREATMENT_REPEAT\n" 01586 " - BORDER_TREATMENT_WRAP\n"); 01587 01588 border_treatment_ = new_mode; 01589 } 01590 01591 01592 private: 01593 BasicImage<value_type> kernel_; 01594 Point2D left_, right_; 01595 value_type norm_; 01596 BorderTreatmentMode border_treatment_; 01597 }; 01598 01599 /**************************************************************/ 01600 /* */ 01601 /* Argument object factories for Kernel2D */ 01602 /* */ 01603 /* (documentation: see vigra/convolution.hxx) */ 01604 /* */ 01605 /**************************************************************/ 01606 01607 template <class KernelIterator, class KernelAccessor> 01608 inline 01609 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> 01610 kernel2d(KernelIterator ik, KernelAccessor ak, Diff2D kul, Diff2D klr, 01611 BorderTreatmentMode border) 01612 01613 { 01614 return 01615 tuple5<KernelIterator, KernelAccessor, Diff2D, Diff2D, BorderTreatmentMode> ( 01616 ik, ak, kul, klr, border); 01617 } 01618 01619 template <class T> 01620 inline 01621 tuple5<typename Kernel2D<T>::ConstIterator, 01622 typename Kernel2D<T>::ConstAccessor, 01623 Diff2D, Diff2D, BorderTreatmentMode> 01624 kernel2d(Kernel2D<T> const & k) 01625 01626 { 01627 return 01628 tuple5<typename Kernel2D<T>::ConstIterator, 01629 typename Kernel2D<T>::ConstAccessor, 01630 Diff2D, Diff2D, BorderTreatmentMode>( 01631 k.center(), 01632 k.accessor(), 01633 k.upperLeft(), k.lowerRight(), 01634 k.borderTreatment()); 01635 } 01636 01637 template <class T> 01638 inline 01639 tuple5<typename Kernel2D<T>::ConstIterator, 01640 typename Kernel2D<T>::ConstAccessor, 01641 Diff2D, Diff2D, BorderTreatmentMode> 01642 kernel2d(Kernel2D<T> const & k, BorderTreatmentMode border) 01643 01644 { 01645 return 01646 tuple5<typename Kernel2D<T>::ConstIterator, 01647 typename Kernel2D<T>::ConstAccessor, 01648 Diff2D, Diff2D, BorderTreatmentMode>( 01649 k.center(), 01650 k.accessor(), 01651 k.upperLeft(), k.lowerRight(), 01652 border); 01653 } 01654 01655 01656 } // namespace vigra 01657 01658 #endif // VIGRA_STDCONVOLUTION_HXX
© Ullrich Köthe (ullrich.koethe@iwr.uni-heidelberg.de) |
html generated using doxygen and Python
|