Hugintrunk  0.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
pyramid2.h
Go to the documentation of this file.
1 /*
2  * Copyright (C) 2004-2007 Andrew Mihal
3  *
4  * This file is part of Enblend.
5  *
6  * Stripped down and adjust for use in hugin by Pablo d'Angelo
7  *
8  * Enblend is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * Enblend is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with Enblend; if not, write to the Free Software
20  * <http://www.gnu.org/licenses/>.
21  */
22 #ifndef __PYRAMID_H__
23 #define __PYRAMID_H__
24 
25 #ifdef HAVE_CONFIG_H
26 #include <config.h>
27 #endif
28 
29 #include <functional>
30 #include <vector>
31 
32 #include <vigra/convolution.hxx>
33 #include <vigra/error.hxx>
34 #include <vigra/inspectimage.hxx>
35 #include <vigra/numerictraits.hxx>
36 #include <vigra/rgbvalue.hxx>
37 #include <vigra/sized_int.hxx>
38 #include <vigra/transformimage.hxx>
39 
40 //#include "fixmath.h"
41 
42 namespace enblend {
43 
44 using std::cout;
45 using std::vector;
46 using std::pair;
47 
48 using vigra::linearRangeMapping;
49 using vigra::NumericTraits;
51 using vigra::triple;
52 using vigra::UInt16;
53 using vigra::UInt16Image;
54 using vigra::UInt16RGBImage;
55 using vigra::Diff2D;
56 
57 
58 #define IMUL6(A) (A * SKIPSMImagePixelType(6))
59 #define IMUL5(A) (A * SKIPSMImagePixelType(5))
60 #define IMUL11(A) (A * SKIPSMImagePixelType(11))
61 #define AMUL6(A) (A * SKIPSMAlphaPixelType(6))
62 
68 template <typename ImagePixelComponentType>
69 unsigned int filterHalfWidth(const unsigned int levels) {
70  // For levels >= 30, the full width will just barely fit in int32.
71  // When this range is added to a bounding box it will certainly
72  // overflow the Diff2D.
73  vigra_precondition((levels >= 1 && levels <= 29),
74  "filterHalfWidth: levels outside of range [1,29]");
75 
76  // This is the arithmetic half width.
77  int halfWidth = (levels == 1) ? 0 : ((1 << (levels+1)) - 4);
78 
79  return halfWidth;
80 }
81 
173 template <typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType,
174  typename SrcImageIterator, typename SrcAccessor,
175  typename AlphaIterator, typename AlphaAccessor,
176  typename DestImageIterator, typename DestAccessor,
177  typename DestAlphaIterator, typename DestAlphaAccessor>
178 inline void reduce(bool wraparound,
179  SrcImageIterator src_upperleft,
180  SrcImageIterator src_lowerright,
181  SrcAccessor sa,
182  AlphaIterator alpha_upperleft,
183  AlphaAccessor aa,
184  DestImageIterator dest_upperleft,
185  DestImageIterator dest_lowerright,
186  DestAccessor da,
187  DestAlphaIterator dest_alpha_upperleft,
188  DestAlphaIterator dest_alpha_lowerright,
189  DestAlphaAccessor daa) {
190 
191  typedef typename DestAccessor::value_type DestPixelType;
192  typedef typename DestAlphaAccessor::value_type DestAlphaPixelType;
193 
194  int src_w = src_lowerright.x - src_upperleft.x;
195  int src_h = src_lowerright.y - src_upperleft.y;
196  int dst_w = dest_lowerright.x - dest_upperleft.x;
197  //int dst_h = dest_lowerright.y - dest_upperleft.y;
198 
199  vigra_precondition(src_w > 1 && src_h > 1,
200  "src image too small in reduce");
201 
202  // State variables for source image pixel values
203  SKIPSMImagePixelType isr0, isr1, isrp;
204  SKIPSMImagePixelType *isc0 = new SKIPSMImagePixelType[dst_w + 1];
205  SKIPSMImagePixelType *isc1 = new SKIPSMImagePixelType[dst_w + 1];
206  SKIPSMImagePixelType *iscp = new SKIPSMImagePixelType[dst_w + 1];
207 
208  // State variables for source mask pixel values
209  SKIPSMAlphaPixelType asr0, asr1, asrp;
210  SKIPSMAlphaPixelType *asc0 = new SKIPSMAlphaPixelType[dst_w + 1];
211  SKIPSMAlphaPixelType *asc1 = new SKIPSMAlphaPixelType[dst_w + 1];
212  SKIPSMAlphaPixelType *ascp = new SKIPSMAlphaPixelType[dst_w + 1];
213 
214  // Convenient constants
215  const SKIPSMImagePixelType SKIPSMImageZero(NumericTraits<SKIPSMImagePixelType>::zero());
216  const SKIPSMAlphaPixelType SKIPSMAlphaZero(NumericTraits<SKIPSMAlphaPixelType>::zero());
217  const SKIPSMAlphaPixelType SKIPSMAlphaOne(NumericTraits<SKIPSMAlphaPixelType>::one());
218  const DestPixelType DestImageZero(NumericTraits<DestPixelType>::zero());
219  const DestAlphaPixelType DestAlphaZero(NumericTraits<DestAlphaPixelType>::zero());
220  const DestAlphaPixelType DestAlphaMax(NumericTraits<DestAlphaPixelType>::max());
221 
222  DestImageIterator dy = dest_upperleft;
223  DestImageIterator dx = dy;
224  SrcImageIterator sy = src_upperleft;
225  SrcImageIterator sx = sy;
226  AlphaIterator ay = alpha_upperleft;
227  AlphaIterator ax = ay;
228  DestAlphaIterator day = dest_alpha_upperleft;
229  DestAlphaIterator dax = day;
230 
231  bool evenY = true;
232  bool evenX = true;
233  int srcy = 0;
234  int srcx = 0;
235  //int dsty = 0;
236  int dstx = 0;
237 
238  // First row
239  {
240  if (wraparound) {
241  asr0 = aa(ay, Diff2D(src_w-2, 0)) ? SKIPSMAlphaOne : SKIPSMAlphaZero;
242  asr1 = SKIPSMAlphaZero;
243  asrp = aa(ay, Diff2D(src_w-1, 0)) ? (SKIPSMAlphaOne * 4) : SKIPSMAlphaZero;
244  isr0 = aa(ay, Diff2D(src_w-2, 0)) ? SKIPSMImagePixelType(sa(sy, Diff2D(src_w-2, 0))) : SKIPSMImageZero;
245  isr1 = SKIPSMImageZero;
246  isrp = aa(ay, Diff2D(src_w-1, 0)) ? (SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1, 0))) * 4) : SKIPSMImageZero;
247  } else {
248  asr0 = SKIPSMAlphaZero;
249  asr1 = SKIPSMAlphaZero;
250  asrp = SKIPSMAlphaZero;
251  isr0 = SKIPSMImageZero;
252  isr1 = SKIPSMImageZero;
253  isrp = SKIPSMImageZero;
254  }
255 
256  for (sx = sy, ax = ay, evenX = true, srcx = 0, dstx = 0; srcx < src_w; ++srcx, ++sx.x, ++ax.x) {
257  SKIPSMAlphaPixelType mcurrent(aa(ax) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
258  SKIPSMImagePixelType icurrent(aa(ax) ? SKIPSMImagePixelType(sa(sx)) : SKIPSMImageZero);
259  if (evenX) {
260  asc1[dstx] = SKIPSMAlphaZero;
261  asc0[dstx] = asr1 + AMUL6(asr0) + asrp + mcurrent;
262  asr1 = asr0 + asrp;
263  asr0 = mcurrent;
264  isc1[dstx] = SKIPSMImageZero;
265  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + icurrent;
266  isr1 = isr0 + isrp;
267  isr0 = icurrent;
268  }
269  else {
270  asrp = mcurrent * 4;
271  isrp = icurrent * 4;
272  ++dstx;
273  }
274  evenX = !evenX;
275  }
276 
277  // Last entries in first row
278  if (!evenX) {
279  // previous srcx was even
280  ++dstx;
281  if (wraparound) {
282  asc1[dstx] = SKIPSMAlphaZero;
283  asc0[dstx] = asr1 + AMUL6(asr0) + (aa(ay) ? (SKIPSMAlphaOne * 4) : SKIPSMAlphaZero) + (aa(ay, Diff2D(1,0)) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
284  isc1[dstx] = SKIPSMImageZero;
285  isc0[dstx] = isr1 + IMUL6(isr0) + (aa(ay) ? (SKIPSMImagePixelType(sa(sy)) * 4) : SKIPSMImageZero)
286  + (aa(ay, Diff2D(1,0)) ? SKIPSMImagePixelType(sa(sy, Diff2D(1,0))) : SKIPSMImageZero);
287  } else {
288  asc1[dstx] = SKIPSMAlphaZero;
289  asc0[dstx] = asr1 + AMUL6(asr0);
290  isc1[dstx] = SKIPSMImageZero;
291  isc0[dstx] = isr1 + IMUL6(isr0);
292  }
293  }
294  else {
295  // previous srcx was odd
296  if (wraparound) {
297  asc1[dstx] = SKIPSMAlphaZero;
298  asc0[dstx] = asr1 + AMUL6(asr0) + asrp + (aa(ay) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
299  isc1[dstx] = SKIPSMImageZero;
300  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + (aa(ay) ? SKIPSMImagePixelType(sa(sy)) : SKIPSMImageZero);
301  } else {
302  asc1[dstx] = SKIPSMAlphaZero;
303  asc0[dstx] = asr1 + AMUL6(asr0) + asrp;
304  isc1[dstx] = SKIPSMImageZero;
305  isc0[dstx] = isr1 + IMUL6(isr0) + isrp;
306  }
307  }
308  }
309  ++sy.y;
310  ++ay.y;
311 
312  // Main Rows
313  {
314  for (evenY = false, srcy = 1; srcy < src_h; ++srcy, ++sy.y, ++ay.y) {
315 
316  if (wraparound) {
317  asr0 = aa(ay, Diff2D(src_w-2, 0)) ? SKIPSMAlphaOne : SKIPSMAlphaZero;
318  asr1 = SKIPSMAlphaZero;
319  asrp = aa(ay, Diff2D(src_w-1, 0)) ? (SKIPSMAlphaOne * 4) : SKIPSMAlphaZero;
320  isr0 = aa(ay, Diff2D(src_w-2, 0)) ? SKIPSMImagePixelType(sa(sy, Diff2D(src_w-2,0))) : SKIPSMImageZero;
321  isr1 = SKIPSMImageZero;
322  isrp = aa(ay, Diff2D(src_w-1, 0)) ? (SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1,0))) * 4) : SKIPSMImageZero;
323  } else {
324  asr0 = SKIPSMAlphaZero;
325  asr1 = SKIPSMAlphaZero;
326  asrp = SKIPSMAlphaZero;
327  isr0 = SKIPSMImageZero;
328  isr1 = SKIPSMImageZero;
329  isrp = SKIPSMImageZero;
330  }
331 
332  if (evenY) {
333  // Even-numbered row
334 
335  // First entry in row
336  sx = sy;
337  ax = ay;
338  if (wraparound) {
339  asr1 = asr0 + asrp;
340  isr1 = isr0 + isrp;
341  }
342  asr0 = aa(ax) ? SKIPSMAlphaOne : SKIPSMAlphaZero;
343  isr0 = aa(ax) ? SKIPSMImagePixelType(sa(sx)) : SKIPSMImageZero;
344  // isc*[0] are never used
345  ++sx.x;
346  ++ax.x;
347  dx = dy;
348  dax = day;
349 
350  // Main entries in row
351  for (evenX = false, srcx = 1, dstx = 0; srcx < src_w; ++srcx, ++sx.x, ++ax.x) {
352  SKIPSMAlphaPixelType mcurrent(aa(ax) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
353  SKIPSMImagePixelType icurrent(aa(ax) ? SKIPSMImagePixelType(sa(sx)) : SKIPSMImageZero);
354  if (evenX) {
355  SKIPSMAlphaPixelType ap = asc1[dstx] + AMUL6(asc0[dstx]) + ascp[dstx];
356  asc1[dstx] = asc0[dstx] + ascp[dstx];
357  asc0[dstx] = asr1 + AMUL6(asr0) + asrp + mcurrent;
358  asr1 = asr0 + asrp;
359  asr0 = mcurrent;
360  ap += asc0[dstx];
361 
362  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
363  isc1[dstx] = isc0[dstx] + iscp[dstx];
364  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + icurrent;
365  isr1 = isr0 + isrp;
366  isr0 = icurrent;
367  if (ap) {
368  ip += isc0[dstx];
369  ip /= ap;
370  da.set(DestPixelType(ip), dx);
371  daa.set(DestAlphaMax, dax);
372  } else {
373  da.set(DestImageZero, dx);
374  daa.set(DestAlphaZero, dax);
375  }
376 
377  ++dx.x;
378  ++dax.x;
379  }
380  else {
381  asrp = mcurrent * 4;
382  isrp = icurrent * 4;
383  ++dstx;
384  }
385  evenX = !evenX;
386  }
387 
388  // Last entries in row
389  if (!evenX) {
390  // previous srcx was even
391  ++dstx;
392 
393  SKIPSMAlphaPixelType ap = asc1[dstx] + AMUL6(asc0[dstx]) + ascp[dstx];
394  asc1[dstx] = asc0[dstx] + ascp[dstx];
395  if (wraparound) {
396  asc0[dstx] = asr1 + AMUL6(asr0) + (aa(ay) ? (SKIPSMAlphaOne * 4) : SKIPSMAlphaZero) + (aa(ay, Diff2D(1,0)) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
397  } else {
398  asc0[dstx] = asr1 + AMUL6(asr0);
399  }
400  ap += asc0[dstx];
401 
402  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
403  isc1[dstx] = isc0[dstx] + iscp[dstx];
404  if (wraparound) {
405  isc0[dstx] = isr1 + IMUL6(isr0) + (aa(ay) ? (SKIPSMImagePixelType(sa(sy)) * 4) : SKIPSMImageZero)
406  + (aa(ay, Diff2D(1,0)) ? SKIPSMImagePixelType(sa(sy, Diff2D(1,0))) : SKIPSMImageZero);
407  } else {
408  isc0[dstx] = isr1 + IMUL6(isr0);
409  }
410  if (ap) {
411  ip += isc0[dstx];
412  //ip /= SKIPSMImagePixelType(ap);
413  ip /= ap;
414  da.set(DestPixelType(ip), dx);
415  daa.set(DestAlphaMax, dax);
416  } else {
417  da.set(DestImageZero, dx);
418  daa.set(DestAlphaZero, dax);
419  }
420  }
421  else {
422  // Previous srcx was odd
423  SKIPSMAlphaPixelType ap = asc1[dstx] + AMUL6(asc0[dstx]) + ascp[dstx];
424  asc1[dstx] = asc0[dstx] + ascp[dstx];
425  if (wraparound) {
426  asc0[dstx] = asr1 + AMUL6(asr0) + asrp + (aa(ay) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
427  } else {
428  asc0[dstx] = asr1 + AMUL6(asr0) + asrp;
429  }
430  ap += asc0[dstx];
431 
432  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
433  isc1[dstx] = isc0[dstx] + iscp[dstx];
434  if (wraparound) {
435  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + (aa(ay) ? SKIPSMImagePixelType(sa(sy)) : SKIPSMImageZero);
436  } else {
437  isc0[dstx] = isr1 + IMUL6(isr0) + isrp;
438  }
439  if (ap) {
440  ip += isc0[dstx];
441  //ip /= SKIPSMImagePixelType(ap);
442  ip /= ap;
443  da.set(DestPixelType(ip), dx);
444  daa.set(DestAlphaMax, dax);
445  } else {
446  da.set(DestImageZero, dx);
447  daa.set(DestAlphaZero, dax);
448  }
449  }
450 
451  ++dy.y;
452  ++day.y;
453 
454  }
455  else {
456  // First entry in odd-numbered row
457  sx = sy;
458  ax = ay;
459  if (wraparound) {
460  asr1 = asr0 + asrp;
461  isr1 = isr0 + isrp;
462  }
463  asr0 = aa(ax) ? SKIPSMAlphaOne : SKIPSMAlphaZero;
464  isr0 = aa(ax) ? SKIPSMImagePixelType(sa(sx)) : SKIPSMImageZero;
465  // isc*[0] are never used
466  ++sx.x;
467  ++ax.x;
468 
469  // Main entries in odd-numbered row
470  for (evenX = false, srcx = 1, dstx = 0; srcx < src_w; ++srcx, ++sx.x, ++ax.x) {
471  SKIPSMAlphaPixelType mcurrent(aa(ax) ? SKIPSMAlphaOne : SKIPSMAlphaZero);
472  SKIPSMImagePixelType icurrent(aa(ax) ? SKIPSMImagePixelType(sa(sx)) : SKIPSMImageZero);
473  if (evenX) {
474  ascp[dstx] = (asr1 + AMUL6(asr0) + asrp + mcurrent) * 4;
475  asr1 = asr0 + asrp;
476  asr0 = mcurrent;
477  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp + icurrent) * 4;
478  isr1 = isr0 + isrp;
479  isr0 = icurrent;
480  }
481  else {
482  asrp = mcurrent * 4;
483  isrp = icurrent * 4;
484  ++dstx;
485  }
486  evenX = !evenX;
487  }
488 
489  // Last entries in row
490  if (!evenX) {
491  // previous srcx was even
492  ++dstx;
493  if (wraparound) {
494  ascp[dstx] = (asr1 + AMUL6(asr0) + (aa(ay) ? (SKIPSMAlphaOne * 4) : SKIPSMAlphaZero)
495  + (aa(ay, Diff2D(1,0)) ? SKIPSMAlphaOne : SKIPSMAlphaZero)
496  ) * 4;
497  iscp[dstx] = (isr1 + IMUL6(isr0) + (aa(ay) ? (SKIPSMImagePixelType(sa(sy)) * 4) : SKIPSMImageZero)
498  + (aa(ay, Diff2D(1,0)) ? SKIPSMImagePixelType(sa(sy, Diff2D(1,0))) : SKIPSMImageZero)
499  ) * 4;
500  } else {
501  ascp[dstx] = (asr1 + AMUL6(asr0)) * 4;
502  iscp[dstx] = (isr1 + IMUL6(isr0)) * 4;
503  }
504  }
505  else {
506  // previous srcx was odd
507  if (wraparound) {
508  ascp[dstx] = (asr1 + AMUL6(asr0) + asrp + (aa(ay) ? SKIPSMAlphaOne : SKIPSMAlphaZero)) * 4;
509  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp + (aa(ay) ? SKIPSMImagePixelType(sa(sy)) : SKIPSMImageZero)) * 4;
510  } else {
511  ascp[dstx] = (asr1 + AMUL6(asr0) + asrp) * 4;
512  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp) * 4;
513  }
514  }
515  }
516  evenY = !evenY;
517  }
518  }
519 
520  // Last Rows
521  {
522  if (!evenY) {
523  // Last srcy was even
524  // odd row will set all iscp[] to zero
525  // even row will do:
526  //isc0[dstx] = 0;
527  //isc1[dstx] = isc0[dstx] + 4*iscp[dstx]
528  //out = isc1[dstx] + 6*isc0[dstx] + 4*iscp[dstx] + newisc0[dstx]
529  for (dstx = 1, dx = dy, dax = day; dstx < (dst_w + 1); ++dstx, ++dx.x, ++dax.x) {
530  SKIPSMAlphaPixelType ap = asc1[dstx] + AMUL6(asc0[dstx]);
531  if (ap) {
532  //SKIPSMImagePixelType ip = (isc1[dstx] + IMUL6(isc0[dstx])) / SKIPSMImagePixelType(ap);
533  SKIPSMImagePixelType ip = (isc1[dstx] + IMUL6(isc0[dstx])) / ap;
534  da.set(DestPixelType(ip), dx);
535  daa.set(DestAlphaMax, dax);
536  } else {
537  da.set(DestImageZero, dx);
538  daa.set(DestAlphaZero, dax);
539  }
540  }
541  }
542  else {
543  // Last srcy was odd
544  // even row will do:
545  // isc0[dstx] = 0;
546  // isc1[dstx] = isc0[dstx] + 4*iscp[dstx]
547  // out = isc1[dstx] + 6*isc0[dstx] + 4*iscp[dstx] + newisc0[dstx]
548  for (dstx = 1, dx = dy, dax = day; dstx < (dst_w + 1); ++dstx, ++dx.x, ++dax.x) {
549  SKIPSMAlphaPixelType ap = asc1[dstx] + AMUL6(asc0[dstx]) + ascp[dstx];
550  if (ap) {
551  //SKIPSMImagePixelType ip = (isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx]) / SKIPSMImagePixelType(ap);
552  SKIPSMImagePixelType ip = (isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx]) / ap;
553  da.set(DestPixelType(ip), dx);
554  daa.set(DestAlphaMax, dax);
555  } else {
556  da.set(DestImageZero, dx);
557  daa.set(DestAlphaZero, dax);
558  }
559  }
560  }
561  }
562 
563  delete[] isc0;
564  delete[] isc1;
565  delete[] iscp;
566 
567  delete[] asc0;
568  delete[] asc1;
569  delete[] ascp;
570 
571 };
572 
573 // Version using argument object factories.
574 template <typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType,
575  typename SrcImageIterator, typename SrcAccessor,
576  typename AlphaIterator, typename AlphaAccessor,
577  typename DestImageIterator, typename DestAccessor,
578  typename DestAlphaIterator, typename DestAlphaAccessor>
579 inline void reduce(bool wraparound,
580  triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
581  pair<AlphaIterator, AlphaAccessor> mask,
582  triple<DestImageIterator, DestImageIterator, DestAccessor> dest,
583  triple<DestAlphaIterator, DestAlphaIterator, DestAlphaAccessor> destMask) {
584  reduce<SKIPSMImagePixelType, SKIPSMAlphaPixelType>(wraparound,
585  src.first, src.second, src.third,
586  mask.first, mask.second,
587  dest.first, dest.second, dest.third,
588  destMask.first, destMask.second, destMask.third);
589 };
590 
594 template <typename SKIPSMImagePixelType,
595  typename SrcImageIterator, typename SrcAccessor,
596  typename DestImageIterator, typename DestAccessor>
597 inline void reduce(bool wraparound,
598  SrcImageIterator src_upperleft,
599  SrcImageIterator src_lowerright,
600  SrcAccessor sa,
601  DestImageIterator dest_upperleft,
602  DestImageIterator dest_lowerright,
603  DestAccessor da) {
604 
605  typedef typename DestAccessor::value_type DestPixelType;
606 
607  int src_w = src_lowerright.x - src_upperleft.x;
608  int src_h = src_lowerright.y - src_upperleft.y;
609  int dst_w = dest_lowerright.x - dest_upperleft.x;
610  //int dst_h = dest_lowerright.y - dest_upperleft.y;
611 
612  vigra_precondition(src_w > 1 && src_h > 1,
613  "src image too small in reduce");
614 
615  // State variables for source image pixel values
616  SKIPSMImagePixelType isr0, isr1, isrp;
617  SKIPSMImagePixelType *isc0 = new SKIPSMImagePixelType[dst_w + 1];
618  SKIPSMImagePixelType *isc1 = new SKIPSMImagePixelType[dst_w + 1];
619  SKIPSMImagePixelType *iscp = new SKIPSMImagePixelType[dst_w + 1];
620 
621  // Convenient constants
622  const SKIPSMImagePixelType SKIPSMImageZero(NumericTraits<SKIPSMImagePixelType>::zero());
623 
624  DestImageIterator dy = dest_upperleft;
625  DestImageIterator dx = dy;
626  SrcImageIterator sy = src_upperleft;
627  SrcImageIterator sx = sy;
628 
629  bool evenY = true;
630  bool evenX = true;
631  int srcy = 0;
632  int srcx = 0;
633  //int dsty = 0;
634  int dstx = 0;
635 
636  // First row
637  {
638  if (wraparound) {
639  isr0 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-2, 0)));
640  isr1 = SKIPSMImageZero;
641  isrp = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1, 0))) * 4;
642  } else {
643  isr0 = SKIPSMImagePixelType(sa(sy));
644  isr1 = SKIPSMImageZero;
645  isrp = SKIPSMImagePixelType(sa(sy)) * 4;
646  }
647 
648  // Main pixels in first row
649  for (sx = sy, evenX = true, srcx = 0, dstx = 0; srcx < src_w; ++srcx, ++sx.x) {
650  SKIPSMImagePixelType icurrent(SKIPSMImagePixelType(sa(sx)));
651  if (evenX) {
652  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + icurrent;
653  isc1[dstx] = IMUL5(isc0[dstx]);
654  isr1 = isr0 + isrp;
655  isr0 = icurrent;
656  }
657  else {
658  isrp = icurrent * 4;
659  ++dstx;
660  }
661  evenX = !evenX;
662  }
663 
664  // Last entries in first row
665  if (!evenX) {
666  // previous srcx was even
667  ++dstx;
668  if (wraparound) {
669  isc0[dstx] = isr1 + IMUL6(isr0) + (SKIPSMImagePixelType(sa(sy)) * 4)
670  + SKIPSMImagePixelType(sa(sy, Diff2D(1,0)));
671  isc1[dstx] = IMUL5(isc0[dstx]);
672  } else {
673  isc0[dstx] = isr1 + IMUL11(isr0);
674  isc1[dstx] = IMUL5(isc0[dstx]);
675  }
676  }
677  else {
678  // previous srcx was odd
679  if (wraparound) {
680  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + SKIPSMImagePixelType(sa(sy));
681  isc1[dstx] = IMUL5(isc0[dstx]);
682  } else {
683  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + (isrp / 4);
684  isc1[dstx] = IMUL5(isc0[dstx]);
685  }
686  }
687  }
688  ++sy.y;
689 
690  // Main Rows
691  {
692  for (evenY = false, srcy = 1; srcy < src_h; ++srcy, ++sy.y) {
693 
694  if (wraparound) {
695  isr0 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-2,0)));
696  isr1 = SKIPSMImageZero;
697  isrp = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1,0))) * 4;
698  } else {
699  isr0 = SKIPSMImagePixelType(sa(sy));
700  isr1 = SKIPSMImageZero;
701  isrp = SKIPSMImagePixelType(sa(sy)) * 4;
702  }
703 
704  if (evenY) {
705  // Even-numbered row
706 
707  // First entry in row
708  sx = sy;
709  isr1 = isr0 + isrp;
710  isr0 = SKIPSMImagePixelType(sa(sx));
711  // isc*[0] are never used
712  ++sx.x;
713  dx = dy;
714 
715  // Main entries in row
716  for (evenX = false, srcx = 1, dstx = 0; srcx < src_w; ++srcx, ++sx.x) {
717  SKIPSMImagePixelType icurrent(SKIPSMImagePixelType(sa(sx)));
718  if (evenX) {
719  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
720  isc1[dstx] = isc0[dstx] + iscp[dstx];
721  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + icurrent;
722  isr1 = isr0 + isrp;
723  isr0 = icurrent;
724  ip += isc0[dstx];
725  ip /= 256;
726  da.set(DestPixelType(ip), dx);
727  ++dx.x;
728  }
729  else {
730  isrp = icurrent * 4;
731  ++dstx;
732  }
733  evenX = !evenX;
734  }
735 
736  // Last entries in row
737  if (!evenX) {
738  // previous srcx was even
739  ++dstx;
740 
741  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
742  isc1[dstx] = isc0[dstx] + iscp[dstx];
743  if (wraparound) {
744  isc0[dstx] = isr1 + IMUL6(isr0) + (SKIPSMImagePixelType(sa(sy)) * 4)
745  + SKIPSMImagePixelType(sa(sy, Diff2D(1,0)));
746  } else {
747  isc0[dstx] = isr1 + IMUL11(isr0);
748  }
749  ip += isc0[dstx];
750  ip /= 256;
751  da.set(DestPixelType(ip), dx);
752  }
753  else {
754  // Previous srcx was odd
755  SKIPSMImagePixelType ip = isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx];
756  isc1[dstx] = isc0[dstx] + iscp[dstx];
757  if (wraparound) {
758  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + SKIPSMImagePixelType(sa(sy));
759  } else {
760  isc0[dstx] = isr1 + IMUL6(isr0) + isrp + (isrp / 4);
761  }
762  ip += isc0[dstx];
763  ip /= 256;
764  da.set(DestPixelType(ip), dx);
765  }
766 
767  ++dy.y;
768  }
769  else {
770  // First entry in odd-numbered row
771  sx = sy;
772  isr1 = isr0 + isrp;
773  isr0 = SKIPSMImagePixelType(sa(sx));
774  // isc*[0] are never used
775  ++sx.x;
776 
777  // Main entries in odd-numbered row
778  for (evenX = false, srcx = 1, dstx = 0; srcx < src_w; ++srcx, ++sx.x) {
779  SKIPSMImagePixelType icurrent(SKIPSMImagePixelType(sa(sx)));
780  if (evenX) {
781  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp + icurrent) * 4;
782  isr1 = isr0 + isrp;
783  isr0 = icurrent;
784  }
785  else {
786  isrp = icurrent * 4;
787  ++dstx;
788  }
789  evenX = !evenX;
790  }
791  // Last entries in row
792  if (!evenX) {
793  // previous srcx was even
794  ++dstx;
795  if (wraparound) {
796  iscp[dstx] = (isr1 + IMUL6(isr0) + (SKIPSMImagePixelType(sa(sy)) * 4)
797  + SKIPSMImagePixelType(sa(sy, Diff2D(1,0)))
798  ) * 4;
799  } else {
800  iscp[dstx] = (isr1 + IMUL11(isr0)) * 4;
801  }
802  }
803  else {
804  // previous srcx was odd
805  if (wraparound) {
806  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp + SKIPSMImagePixelType(sa(sy))) * 4;
807  } else {
808  iscp[dstx] = (isr1 + IMUL6(isr0) + isrp + (isrp / 4)) * 4;
809  }
810  }
811  }
812  evenY = !evenY;
813  }
814  }
815 
816  // Last Rows
817  {
818  if (!evenY) {
819  // Last srcy was even
820  // odd row will set all iscp[] to zero
821  // even row will do:
822  //isc0[dstx] = 0;
823  //isc1[dstx] = isc0[dstx] + 4*iscp[dstx]
824  //out = isc1[dstx] + 6*isc0[dstx] + 4*iscp[dstx] + newisc0[dstx]
825  for (dstx = 1, dx = dy; dstx < (dst_w + 1); ++dstx, ++dx.x) {
826  SKIPSMImagePixelType ip = (isc1[dstx] + IMUL11(isc0[dstx])) / 256;
827  da.set(DestPixelType(ip), dx);
828  }
829  }
830  else {
831  // Last srcy was odd
832  // even row will do:
833  // isc0[dstx] = 0;
834  // isc1[dstx] = isc0[dstx] + 4*iscp[dstx]
835  // out = isc1[dstx] + 6*isc0[dstx] + 4*iscp[dstx] + newisc0[dstx]
836  for (dstx = 1, dx = dy; dstx < (dst_w + 1); ++dstx, ++dx.x) {
837  SKIPSMImagePixelType ip = (isc1[dstx] + IMUL6(isc0[dstx]) + iscp[dstx] + (iscp[dstx] / 4)) / 256;
838  da.set(DestPixelType(ip), dx);
839  }
840  }
841  }
842 
843  delete[] isc0;
844  delete[] isc1;
845  delete[] iscp;
846 
847 };
848 
849 // Version using argument object factories.
850 template <typename SKIPSMImagePixelType,
851  typename SrcImageIterator, typename SrcAccessor,
852  typename DestImageIterator, typename DestAccessor>
853 inline void reduce(bool wraparound,
854  triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
855  triple<DestImageIterator, DestImageIterator, DestAccessor> dest) {
856  reduce<SKIPSMImagePixelType>(wraparound,
857  src.first, src.second, src.third,
858  dest.first, dest.second, dest.third);
859 };
860 
861 // SKIPSM update routine used when visiting a pixel in the top two rows
862 // and the left two rows.
863 #define SKIPSM_EXPAND(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11) \
864  current = SKIPSMImagePixelType(sa(sx)); \
865  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
866  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
867  out01 = sc0a[srcx]; \
868  out11 = sc0b[srcx]; \
869  sc1a[srcx] = sc0a[srcx]; \
870  sc1b[srcx] = sc0b[srcx]; \
871  sc0a[srcx] = sr1 + IMUL6(sr0) + current; \
872  sc0b[srcx] = (sr0 + current) * 4; \
873  sr1 = sr0; \
874  sr0 = current; \
875  out00 += sc0a[srcx]; \
876  out10 += sc0b[srcx]; \
877  out01 += sc0a[srcx]; \
878  out11 += sc0b[srcx]; \
879  out00 /= SKIPSMImagePixelType(SCALE_OUT00); \
880  out10 /= SKIPSMImagePixelType(SCALE_OUT10); \
881  out01 /= SKIPSMImagePixelType(SCALE_OUT01); \
882  out11 /= SKIPSMImagePixelType(SCALE_OUT11); \
883  da.set(cf(SKIPSMImagePixelType(da(dx)), out00), dx); \
884  ++dx.x; \
885  da.set(cf(SKIPSMImagePixelType(da(dx)), out10), dx); \
886  ++dx.x; \
887  da.set(cf(SKIPSMImagePixelType(da(dxx)), out01), dxx); \
888  ++dxx.x; \
889  da.set(cf(SKIPSMImagePixelType(da(dxx)), out11), dxx); \
890  ++dxx.x;
891 
892 // SKIPSM update routine used when visiting a pixel in the main image body.
893 // Same as above, but with hard-coded scaling factors.
894 #define SKIPSM_EXPAND_SHIFT \
895  current = SKIPSMImagePixelType(sa(sx)); \
896  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
897  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
898  out01 = sc0a[srcx]; \
899  out11 = sc0b[srcx]; \
900  sc1a[srcx] = sc0a[srcx]; \
901  sc1b[srcx] = sc0b[srcx]; \
902  sc0a[srcx] = sr1 + IMUL6(sr0) + current; \
903  sc0b[srcx] = (sr0 + current) * 4; \
904  sr1 = sr0; \
905  sr0 = current; \
906  out00 += sc0a[srcx]; \
907  out10 += sc0b[srcx]; \
908  out01 += sc0a[srcx]; \
909  out11 += sc0b[srcx]; \
910  out00 /= 64; \
911  out10 /= 64; \
912  out01 /= 16; \
913  out11 /= 16; \
914  da.set(cf(SKIPSMImagePixelType(da(dx)), out00), dx); \
915  ++dx.x; \
916  da.set(cf(SKIPSMImagePixelType(da(dx)), out10), dx); \
917  ++dx.x; \
918  da.set(cf(SKIPSMImagePixelType(da(dxx)), out01), dxx); \
919  ++dxx.x; \
920  da.set(cf(SKIPSMImagePixelType(da(dxx)), out11), dxx); \
921  ++dxx.x;
922 
923 // SKIPSM update routine used for the extra row under the main image body.
924 #define SKIPSM_EXPAND_ROW_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11) \
925  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
926  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
927  out00 /= SKIPSMImagePixelType(SCALE_OUT00); \
928  out10 /= SKIPSMImagePixelType(SCALE_OUT10); \
929  da.set(cf(da(dx), out00), dx); \
930  ++dx.x; \
931  da.set(cf(da(dx), out10), dx); \
932  ++dx.x; \
933  if ((dst_h & 1) == 0) { \
934  out01 = sc0a[srcx]; \
935  out11 = sc0b[srcx]; \
936  out01 /= SKIPSMImagePixelType(SCALE_OUT01); \
937  out11 /= SKIPSMImagePixelType(SCALE_OUT11); \
938  da.set(cf(da(dxx), out01), dxx); \
939  ++dxx.x; \
940  da.set(cf(da(dxx), out11), dxx); \
941  ++dxx.x; \
942  }
943 
944 // SKIPSM update routine used for the extra column to the right
945 // of the main image body.
946 #define SKIPSM_EXPAND_COLUMN_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11) \
947  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
948  out01 = sc0a[srcx]; \
949  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
950  out11 = sc0b[srcx]; \
951  sc1a[srcx] = sc0a[srcx]; \
952  sc1b[srcx] = sc0b[srcx]; \
953  sc0a[srcx] = sr1 + IMUL6(sr0); \
954  sc0b[srcx] = sr0 * 4; \
955  out00 += sc0a[srcx]; \
956  out01 += sc0a[srcx]; \
957  out00 /= SKIPSMImagePixelType(SCALE_OUT00); \
958  out01 /= SKIPSMImagePixelType(SCALE_OUT01); \
959  da.set(cf(da(dx), out00), dx); \
960  da.set(cf(da(dxx), out01), dxx); \
961  if ((dst_w & 1) == 0) { \
962  ++dx.x; \
963  ++dxx.x; \
964  out10 += sc0b[srcx]; \
965  out11 += sc0b[srcx]; \
966  out10 /= SKIPSMImagePixelType(SCALE_OUT10); \
967  out11 /= SKIPSMImagePixelType(SCALE_OUT11); \
968  da.set(cf(da(dx), out10), dx); \
969  da.set(cf(da(dxx), out11), dxx); \
970  }
971 
972 // SKIPSM update routine used for the extra column to the right
973 // of the main image body, with wraparound boundary conditions.
974 #define SKIPSM_EXPAND_COLUMN_END_WRAPAROUND(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11) \
975  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
976  out01 = sc0a[srcx]; \
977  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
978  out11 = sc0b[srcx]; \
979  sc1a[srcx] = sc0a[srcx]; \
980  sc1b[srcx] = sc0b[srcx]; \
981  sc0a[srcx] = sr1 + IMUL6(sr0) + SKIPSMImagePixelType(sa(sy)); \
982  sc0b[srcx] = (sr0 + SKIPSMImagePixelType(sa(sy))) * 4; \
983  out00 += sc0a[srcx]; \
984  out01 += sc0a[srcx]; \
985  out00 /= SKIPSMImagePixelType(SCALE_OUT00); \
986  out01 /= SKIPSMImagePixelType(SCALE_OUT01); \
987  da.set(cf(da(dx), out00), dx); \
988  da.set(cf(da(dxx), out01), dxx); \
989  if ((dst_w & 1) == 0) { \
990  ++dx.x; \
991  ++dxx.x; \
992  out10 += sc0b[srcx]; \
993  out11 += sc0b[srcx]; \
994  out10 /= SKIPSMImagePixelType(SCALE_OUT10); \
995  out11 /= SKIPSMImagePixelType(SCALE_OUT11); \
996  da.set(cf(da(dx), out10), dx); \
997  da.set(cf(da(dxx), out11), dxx); \
998  }
999 
1000 // SKIPSM update routine for the extra column to the right
1001 // of the extra row under the main image body.
1002 #define SKIPSM_EXPAND_ROW_COLUMN_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11) \
1003  out00 = sc1a[srcx] + IMUL6(sc0a[srcx]); \
1004  out00 /= SKIPSMImagePixelType(SCALE_OUT00); \
1005  da.set(cf(da(dx), out00), dx); \
1006  if ((dst_w & 1) == 0) { \
1007  out10 = sc1b[srcx] + IMUL6(sc0b[srcx]); \
1008  out10 /= SKIPSMImagePixelType(SCALE_OUT10); \
1009  ++dx.x; \
1010  da.set(cf(da(dx), out10), dx); \
1011  } \
1012  if ((dst_h & 1) == 0) { \
1013  out01 = sc0a[srcx]; \
1014  out01 /= SKIPSMImagePixelType(SCALE_OUT01); \
1015  da.set(cf(da(dxx), out01), dxx); \
1016  if ((dst_w & 1) == 0) { \
1017  out11 = sc0b[srcx]; \
1018  out11 /= SKIPSMImagePixelType(SCALE_OUT11); \
1019  ++dxx.x; \
1020  da.set(cf(da(dxx), out11), dxx); \
1021  } \
1022  }
1023 
1085 template <typename SKIPSMImagePixelType,
1086  typename SrcImageIterator, typename SrcAccessor,
1087  typename DestImageIterator, typename DestAccessor,
1088  typename CombineFunctor>
1089 void expand(bool add, bool wraparound,
1090  SrcImageIterator src_upperleft,
1091  SrcImageIterator src_lowerright,
1092  SrcAccessor sa,
1093  DestImageIterator dest_upperleft,
1094  DestImageIterator dest_lowerright,
1095  DestAccessor da,
1096  CombineFunctor cf) {
1097 
1098  int src_w = src_lowerright.x - src_upperleft.x;
1099  int src_h = src_lowerright.y - src_upperleft.y;
1100  int dst_w = dest_lowerright.x - dest_upperleft.x;
1101  int dst_h = dest_lowerright.y - dest_upperleft.y;
1102 
1103  // SKIPSM state variables
1104  SKIPSMImagePixelType current;
1105  SKIPSMImagePixelType out00, out10, out01, out11;
1106  SKIPSMImagePixelType sr0, sr1;
1107  SKIPSMImagePixelType *sc0a = new SKIPSMImagePixelType[src_w + 1];
1108  SKIPSMImagePixelType *sc0b = new SKIPSMImagePixelType[src_w + 1];
1109  SKIPSMImagePixelType *sc1a = new SKIPSMImagePixelType[src_w + 1];
1110  SKIPSMImagePixelType *sc1b = new SKIPSMImagePixelType[src_w + 1];
1111 
1112  // Convenient constants
1113  const SKIPSMImagePixelType SKIPSMImageZero(NumericTraits<SKIPSMImagePixelType>::zero());
1114 
1115  DestImageIterator dy = dest_upperleft;
1116  DestImageIterator dyy = dest_upperleft;
1117  DestImageIterator dx = dy;
1118  DestImageIterator dxx = dyy;
1119  SrcImageIterator sy = src_upperleft;
1120  SrcImageIterator sx = sy;
1121 
1122  int srcy = 0;
1123  int srcx = 0;
1124  //int dsty = 0;
1125  //int dstx = 0;
1126 
1127  // First row
1128  {
1129  if (wraparound) {
1130  sr0 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1,0)));
1131  sr1 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-2,0)));
1132  } else {
1133  sr0 = SKIPSMImageZero;
1134  sr1 = SKIPSMImageZero;
1135  }
1136 
1137  for (sx = sy, srcx = 0; srcx < src_w; ++srcx, ++sx.x) {
1138  current = SKIPSMImagePixelType(sa(sx));
1139  sc0a[srcx] = sr1 + IMUL6(sr0) + current;
1140  sc0b[srcx] = (sr0 + current) * 4;
1141  sc1a[srcx] = SKIPSMImageZero;
1142  sc1b[srcx] = SKIPSMImageZero;
1143  sr1 = sr0;
1144  sr0 = current;
1145  }
1146 
1147  // extra column at end of first row
1148  if (wraparound) {
1149  sc0a[srcx] = sr1 + IMUL6(sr0) + SKIPSMImagePixelType(sa(sy));
1150  sc0b[srcx] = (sr0 + SKIPSMImagePixelType(sa(sy))) * 4;
1151  } else {
1152  sc0a[srcx] = sr1 + IMUL6(sr0);
1153  sc0b[srcx] = sr0 * 4;
1154  }
1155  sc1a[srcx] = SKIPSMImageZero;
1156  sc1b[srcx] = SKIPSMImageZero;
1157  }
1158 
1159  // dy = row 0
1160  // dyy = row 1
1161  ++dyy.y;
1162  // sy = row 1
1163  srcy = 1;
1164  ++sy.y;
1165 
1166  // Second row
1167  if (src_h > 1) {
1168  // First column
1169  srcx = 0;
1170  sx = sy;
1171  sr0 = SKIPSMImagePixelType(sa(sx));
1172  if (wraparound) {
1173  sr1 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1,0)));
1174  } else {
1175  sr1 = SKIPSMImageZero;
1176  }
1177  // sc*[0] are irrelevant
1178 
1179  srcx = 1;
1180  ++sx.x;
1181  dx = dy;
1182  dxx = dyy;
1183 
1184  // Second column
1185  if (src_w > 1) {
1186  if (wraparound) {
1187  SKIPSM_EXPAND(56, 56, 16, 16)
1188  } else {
1189  SKIPSM_EXPAND(49, 56, 14, 16)
1190  }
1191 
1192  // Main columns
1193  for (srcx = 2, ++sx.x; srcx < src_w; ++srcx, ++sx.x) {
1194  SKIPSM_EXPAND(56, 56, 16, 16)
1195  }
1196 
1197  // extra column at end of second row
1198  if (wraparound) {
1199  SKIPSM_EXPAND_COLUMN_END_WRAPAROUND(56, 56, 16, 16)
1200  } else {
1201  SKIPSM_EXPAND_COLUMN_END(49, 28, 14, 8)
1202  }
1203  }
1204  else {
1205  // Math works out exactly the same for wraparound and no wraparound when src_w ==1
1206  SKIPSM_EXPAND_COLUMN_END(42, 28, 12, 8)
1207  }
1208 
1209  }
1210  else {
1211  // No Second Row
1212  // First Column
1213  srcx = 0;
1214  sr0 = SKIPSMImageZero;
1215  sr1 = SKIPSMImageZero;
1216 
1217  dx = dy;
1218  dxx = dyy;
1219 
1220  if (src_w > 1) {
1221  // Second Column
1222  srcx = 1;
1223  if (wraparound) {
1224  SKIPSM_EXPAND_ROW_END(48, 48, 8, 8)
1225  } else {
1226  SKIPSM_EXPAND_ROW_END(42, 48, 7, 8)
1227  }
1228 
1229  // Main columns
1230  for (srcx = 2; srcx < src_w; ++srcx) {
1231  SKIPSM_EXPAND_ROW_END(48, 48, 8, 8)
1232  }
1233 
1234  // extra column at end of row
1235  if (wraparound) {
1236  SKIPSM_EXPAND_ROW_COLUMN_END(48, 48, 8, 8)
1237  } else {
1238  SKIPSM_EXPAND_ROW_COLUMN_END(42, 24, 7, 4)
1239  }
1240  }
1241  else {
1242  // No Second Column
1243  // dst_w, dst_h must be at least 2
1244  SKIPSM_EXPAND_ROW_COLUMN_END(36, 24, 6, 4)
1245  }
1246 
1247  delete[] sc0a;
1248  delete[] sc0b;
1249  delete[] sc1a;
1250  delete[] sc1b;
1251 
1252  return;
1253  }
1254 
1255  // dy = row 2
1256  // dyy = row 3
1257  dy.y += 2;
1258  dyy.y += 2;
1259  // sy = row 2
1260  srcy = 2;
1261  ++sy.y;
1262 
1263  // Main Rows
1264  for (srcy = 2, sx = sy; srcy < src_h; ++srcy, ++sy.y, dy.y += 2, dyy.y += 2) {
1265  // First column
1266  srcx = 0;
1267  sx = sy;
1268  sr0 = SKIPSMImagePixelType(sa(sx));
1269  if (wraparound) {
1270  sr1 = SKIPSMImagePixelType(sa(sy, Diff2D(src_w-1,0)));
1271  } else {
1272  sr1 = SKIPSMImageZero;
1273  }
1274  // sc*[0] are irrelvant
1275 
1276  srcx = 1;
1277  ++sx.x;
1278  dx = dy;
1279  dxx = dyy;
1280 
1281  // Second column
1282  if (src_w > 1) {
1283  if (wraparound) {
1285  } else {
1286  SKIPSM_EXPAND(56, 64, 14, 16)
1287  }
1288 
1289  // Main columns
1290  for (srcx = 2, ++sx.x; srcx < src_w; ++srcx, ++sx.x) {
1291  //SKIPSM_EXPAND(64, 64, 16, 16)
1293  }
1294 
1295  // extra column at end of row
1296  if (wraparound) {
1297  SKIPSM_EXPAND_COLUMN_END_WRAPAROUND(64, 64, 16, 16)
1298  } else {
1299  SKIPSM_EXPAND_COLUMN_END(56, 32, 14, 8)
1300  }
1301  }
1302  else {
1303  // No second column
1304  // dst_w must be at least 2
1305  // Math works out exactly the same for wraparound and no wraparound when src_w ==1
1306  SKIPSM_EXPAND_COLUMN_END(48, 32, 12, 8)
1307  }
1308  }
1309 
1310  // Extra row at end
1311  {
1312  srcx = 0;
1313  sr0 = SKIPSMImageZero;
1314  sr1 = SKIPSMImageZero;
1315 
1316  dx = dy;
1317  dxx = dyy;
1318 
1319  if (src_w > 1) {
1320  // Second Column
1321  srcx = 1;
1322  if (wraparound) {
1323  SKIPSM_EXPAND_ROW_END(56, 56, 8, 8)
1324  } else {
1325  SKIPSM_EXPAND_ROW_END(49, 56, 7, 8)
1326  }
1327 
1328  // Main columns
1329  for (srcx = 2; srcx < src_w; ++srcx) {
1330  SKIPSM_EXPAND_ROW_END(56, 56, 8, 8)
1331  }
1332 
1333  // extra column at end of row
1334  if (wraparound) {
1335  SKIPSM_EXPAND_ROW_COLUMN_END(56, 56, 8, 8)
1336  } else {
1337  SKIPSM_EXPAND_ROW_COLUMN_END(49, 28, 7, 4)
1338  }
1339  }
1340  else {
1341  // No Second Column
1342  // dst_w, dst_h must be at least 2
1343  SKIPSM_EXPAND_ROW_COLUMN_END(42, 28, 6, 4)
1344  }
1345  }
1346 
1347  delete[] sc0a;
1348  delete[] sc0b;
1349  delete[] sc1a;
1350  delete[] sc1b;
1351 
1352 };
1353 
1354 // Functor that adds two values and de-promotes the result.
1355 // Used when collapsing a laplacian pyramid.
1356 // Explict fromPromote necessary to avoid overflow/underflow problems.
1357 template<typename T1, typename T2, typename T3>
1359  inline T3 operator()(const T1 &a, const T2 &b) const {
1360  return NumericTraits<T3>::fromPromote(a + b);
1361  }
1362 };
1363 
1364 // Version using argument object factories.
1365 template <typename SKIPSMImagePixelType,
1366  typename SrcImageIterator, typename SrcAccessor,
1367  typename DestImageIterator, typename DestAccessor>
1368 inline void expand(bool add, bool wraparound,
1369  triple<SrcImageIterator, SrcImageIterator, SrcAccessor> src,
1370  triple<DestImageIterator, DestImageIterator, DestAccessor> dest) {
1371 
1372  typedef typename DestAccessor::value_type DestPixelType;
1373 
1374  if (add) {
1375  expand<SKIPSMImagePixelType>(add, wraparound,
1376  src.first, src.second, src.third,
1377  dest.first, dest.second, dest.third,
1379  }
1380  else {
1381  expand<SKIPSMImagePixelType>(add, wraparound,
1382  src.first, src.second, src.third,
1383  dest.first, dest.second, dest.third,
1384  std::minus<SKIPSMImagePixelType>());
1385  }
1386 
1387 };
1388 
1389 #if 0
1390 // doesn't compile
1391 
1393 template <typename SrcImageType, typename AlphaImageType, typename PyramidImageType,
1394  int PyramidIntegerBits, int PyramidFractionBits,
1395  typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType>
1396 vector<PyramidImageType*> *gaussianPyramid(unsigned int numLevels,
1397  bool wraparound,
1398  typename SrcImageType::const_traverser src_upperleft,
1399  typename SrcImageType::const_traverser src_lowerright,
1400  typename SrcImageType::ConstAccessor sa,
1401  typename AlphaImageType::const_traverser alpha_upperleft,
1402  typename AlphaImageType::ConstAccessor aa) {
1403 
1404  vector<PyramidImageType*> *gp = new vector<PyramidImageType*>();
1405 
1406  // Size of pyramid level 0
1407  int w = src_lowerright.x - src_upperleft.x;
1408  int h = src_lowerright.y - src_upperleft.y;
1409 
1410  // Pyramid level 0
1411  PyramidImageType *gp0 = new PyramidImageType(w, h);
1412 
1413  // Copy src image into gp0, using fixed-point conversions.
1414  copyToPyramidImage<SrcImageType, PyramidImageType, PyramidIntegerBits, PyramidFractionBits>(
1415  src_upperleft, src_lowerright, sa, gp0->upperLeft(), gp0->accessor());
1416 
1417  gp->push_back(gp0);
1418 
1419  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1420  cout << "Generating Gaussian pyramid: g0";
1421  }
1422 
1423  // Make remaining levels.
1424  PyramidImageType *lastGP = gp0;
1425  AlphaImageType *lastA = NULL;
1426  for (unsigned int l = 1; l < numLevels; l++) {
1427 
1428  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1429  cout << " g" << l;
1430  cout.flush();
1431  }
1432 
1433  // Size of next level
1434  w = (w + 1) >> 1;
1435  h = (h + 1) >> 1;
1436 
1437  // Next pyramid level
1438  PyramidImageType *gpn = new PyramidImageType(w, h);
1439  AlphaImageType *nextA = new AlphaImageType(w, h);
1440 
1441  if (lastA == NULL) {
1442  reduce<SKIPSMImagePixelType, SKIPSMAlphaPixelType>(wraparound,
1443  srcImageRange(*lastGP), maskIter(alpha_upperleft, aa),
1444  destImageRange(*gpn), destImageRange(*nextA));
1445  } else {
1446  reduce<SKIPSMImagePixelType, SKIPSMAlphaPixelType>(wraparound,
1447  srcImageRange(*lastGP), maskImage(*lastA),
1448  destImageRange(*gpn), destImageRange(*nextA));
1449  }
1450 
1451  gp->push_back(gpn);
1452  lastGP = gpn;
1453  delete lastA;
1454  lastA = nextA;
1455  }
1456 
1457  delete lastA;
1458 
1459  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1460  cout << endl;
1461  }
1462 
1463  return gp;
1464 
1465 };
1466 
1467 // Version using argument object factories.
1468 template <typename SrcImageType, typename AlphaImageType, typename PyramidImageType,
1469  int PyramidIntegerBits, int PyramidFractionBits,
1470  typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType>
1471 inline vector<PyramidImageType*> *gaussianPyramid(unsigned int numLevels,
1472  bool wraparound,
1473  triple<typename SrcImageType::const_traverser, typename SrcImageType::const_traverser, typename SrcImageType::ConstAccessor> src,
1474  pair<typename AlphaImageType::const_traverser, typename AlphaImageType::ConstAccessor> alpha) {
1475  return gaussianPyramid<SrcImageType, AlphaImageType, PyramidImageType,
1476  PyramidIntegerBits, PyramidFractionBits,
1477  SKIPSMImagePixelType, SKIPSMAlphaPixelType>(
1478  numLevels, wraparound,
1479  src.first, src.second, src.third,
1480  alpha.first, alpha.second);
1481 };
1482 
1484 template <typename SrcImageType, typename PyramidImageType,
1485  int PyramidIntegerBits, int PyramidFractionBits,
1486  typename SKIPSMImagePixelType>
1487 vector<PyramidImageType*> *gaussianPyramid(unsigned int numLevels,
1488  bool wraparound,
1489  typename SrcImageType::const_traverser src_upperleft,
1490  typename SrcImageType::const_traverser src_lowerright,
1491  typename SrcImageType::ConstAccessor sa) {
1492 
1493  vector<PyramidImageType*> *gp = new vector<PyramidImageType*>();
1494 
1495  // Size of pyramid level 0
1496  int w = src_lowerright.x - src_upperleft.x;
1497  int h = src_lowerright.y - src_upperleft.y;
1498 
1499  // Pyramid level 0
1500  PyramidImageType *gp0 = new PyramidImageType(w, h);
1501 
1502  // Copy src image into gp0, using fixed-point conversions.
1503  copyToPyramidImage<SrcImageType, PyramidImageType, PyramidIntegerBits, PyramidFractionBits>(
1504  src_upperleft, src_lowerright, sa,
1505  gp0->upperLeft(), gp0->accessor());
1506 
1507  gp->push_back(gp0);
1508 
1509  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1510  cout << "Generating Gaussian pyramid: g0";
1511  }
1512 
1513  // Make remaining levels.
1514  PyramidImageType *lastGP = gp0;
1515  for (unsigned int l = 1; l < numLevels; l++) {
1516 
1517  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1518  cout << " g" << l;
1519  cout.flush();
1520  }
1521 
1522  // Size of next level
1523  w = (w + 1) >> 1;
1524  h = (h + 1) >> 1;
1525 
1526  // Next pyramid level
1527  PyramidImageType *gpn = new PyramidImageType(w, h);
1528 
1529  reduce<SKIPSMImagePixelType>(wraparound, srcImageRange(*lastGP), destImageRange(*gpn));
1530 
1531  gp->push_back(gpn);
1532  lastGP = gpn;
1533  }
1534 
1535  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1536  cout << endl;
1537  }
1538 
1539  return gp;
1540 };
1541 
1542 // Version using argument object factories.
1543 template <typename SrcImageType, typename PyramidImageType,
1544  int PyramidIntegerBits, int PyramidFractionBits,
1545  typename SKIPSMImagePixelType>
1546 inline vector<PyramidImageType*> *gaussianPyramid(unsigned int numLevels,
1547  bool wraparound,
1548  triple<typename SrcImageType::const_traverser, typename SrcImageType::const_traverser, typename SrcImageType::ConstAccessor> src) {
1549  return gaussianPyramid<SrcImageType, PyramidImageType,
1550  PyramidIntegerBits, PyramidFractionBits, SKIPSMImagePixelType>(
1551  numLevels,
1552  wraparound,
1553  src.first, src.second, src.third);
1554 };
1555 
1557 template <typename SrcImageType, typename AlphaImageType, typename PyramidImageType,
1558  int PyramidIntegerBits, int PyramidFractionBits,
1559  typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType>
1560 vector<PyramidImageType*> *laplacianPyramid(const char* exportName, unsigned int numLevels,
1561  bool wraparound,
1562  typename SrcImageType::const_traverser src_upperleft,
1563  typename SrcImageType::const_traverser src_lowerright,
1564  typename SrcImageType::ConstAccessor sa,
1565  typename AlphaImageType::const_traverser alpha_upperleft,
1566  typename AlphaImageType::ConstAccessor aa) {
1567 
1568  // First create a Gaussian pyramid.
1569  vector <PyramidImageType*> *gp =
1570  gaussianPyramid<SrcImageType, AlphaImageType, PyramidImageType,
1571  PyramidIntegerBits, PyramidFractionBits,
1572  SKIPSMImagePixelType, SKIPSMAlphaPixelType>(
1573  numLevels, wraparound,
1574  src_upperleft, src_lowerright, sa,
1575  alpha_upperleft, aa);
1576 
1577  //exportPyramid(gp, exportName);
1578 
1579  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1580  cout << "Generating Laplacian pyramid:";
1581  cout.flush();
1582  }
1583 
1584  // For each level, subtract the expansion of the next level.
1585  // Stop if there is no next level.
1586  for (unsigned int l = 0; l < (numLevels-1); l++) {
1587 
1588  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1589  cout << " l" << l;
1590  cout.flush();
1591  }
1592 
1593  expand<SKIPSMImagePixelType>(false, wraparound,
1594  srcImageRange(*((*gp)[l+1])),
1595  destImageRange(*((*gp)[l])));
1596  }
1597 
1598  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1599  cout << " l" << (numLevels-1) << endl;
1600  }
1601 
1602  //exportPyramid(gp, exportName);
1603 
1604  return gp;
1605 };
1606 
1607 // Version using argument object factories.
1608 template <typename SrcImageType, typename AlphaImageType, typename PyramidImageType,
1609  int PyramidIntegerBits, int PyramidFractionBits,
1610  typename SKIPSMImagePixelType, typename SKIPSMAlphaPixelType>
1611 inline vector<PyramidImageType*> *laplacianPyramid(const char* exportName, unsigned int numLevels,
1612  bool wraparound,
1613  triple<typename SrcImageType::const_traverser, typename SrcImageType::const_traverser, typename SrcImageType::ConstAccessor> src,
1614  pair<typename AlphaImageType::const_traverser, typename AlphaImageType::ConstAccessor> alpha) {
1615  return laplacianPyramid<SrcImageType, AlphaImageType, PyramidImageType,
1616  PyramidIntegerBits, PyramidFractionBits,
1617  SKIPSMImagePixelType, SKIPSMAlphaPixelType>(
1618  exportName,
1619  numLevels, wraparound,
1620  src.first, src.second, src.third,
1621  alpha.first, alpha.second);
1622 };
1623 
1625 template <typename SKIPSMImagePixelType, typename PyramidImageType>
1626 void collapsePyramid(bool wraparound, vector<PyramidImageType*> *p) {
1627 
1628  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1629  cout << "Collapsing Laplacian pyramid: "
1630  << "l" << p->size()-1;
1631  cout.flush();
1632  }
1633 
1634  // For each level, add the expansion of the next level.
1635  // Work backwards from the smallest level to the largest.
1636  for (int l = (p->size()-2); l >= 0; l--) {
1637 
1638  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1639  cout << " l" << l;
1640  cout.flush();
1641  }
1642 
1643  expand<SKIPSMImagePixelType>(true, wraparound,
1644  srcImageRange(*((*p)[l+1])),
1645  destImageRange(*((*p)[l])));
1646  }
1647 
1648  if (Verbose > VERBOSE_PYRAMID_MESSAGES) {
1649  cout << endl;
1650  }
1651 
1652 };
1653 
1654 // Export a scalar pyramid as a set of UINT16 tiff files.
1655 template <typename PyramidImageType>
1656 void exportPyramid(vector<PyramidImageType*> *v, const char *prefix, VigraTrueType) {
1657  typedef typename PyramidImageType::value_type PyramidValueType;
1658 
1659  //for (unsigned int i = 0; i < (v->size() - 1); i++) {
1660  // // Clear all levels except last.
1661  // initImage(destImageRange(*((*v)[i])), NumericTraits<PyramidValueType>::zero());
1662  //}
1663  //collapsePyramid(false, v);
1664 
1665  for (unsigned int i = 0; i < v->size(); i++) {
1666  char filenameBuf[512];
1667  snprintf(filenameBuf, 512, "%s%04u.tif", prefix, i);
1668 
1669  // Rescale the pyramid values to fit in UINT16.
1670  UInt16Image usPyramid((*v)[i]->width(), (*v)[i]->height());
1671  transformImage(srcImageRange(*((*v)[i])), destImage(usPyramid),
1672  linearRangeMapping(NumericTraits<PyramidValueType>::min(),
1676 
1677  ImageExportInfo info(filenameBuf);
1678  exportImage(srcImageRange(usPyramid), info);
1679  }
1680 };
1681 
1682 // Export a vector pyramid as a set of UINT16 tiff files.
1683 template <typename PyramidImageType>
1684 void exportPyramid(vector<PyramidImageType*> *v, const char *prefix, VigraFalseType) {
1685  typedef typename PyramidImageType::value_type PyramidVectorType;
1686  typedef typename PyramidVectorType::value_type PyramidValueType;
1687 
1688  for (unsigned int i = 0; i < (v->size() - 1); i++) {
1689  // Clear all levels except last.
1690  initImage(destImageRange(*((*v)[i])), NumericTraits<PyramidValueType>::zero());
1691  }
1692  collapsePyramid(false, v);
1693 
1694  for (unsigned int i = 0; i < v->size(); i++) {
1695  char filenameBuf[512];
1696  snprintf(filenameBuf, 512, "%s%04u.tif", prefix, i);
1697 
1698  // Rescale the pyramid values to fit in UINT16.
1699  UInt16RGBImage usPyramid((*v)[i]->width(), (*v)[i]->height());
1700  transformImage(srcImageRange(*((*v)[i])), destImage(usPyramid),
1701  linearRangeMapping(PyramidVectorType(NumericTraits<PyramidValueType>::min()),
1702  PyramidVectorType(NumericTraits<PyramidValueType>::max()),
1703  typename UInt16RGBImage::value_type(NumericTraits<UInt16>::min()),
1704  typename UInt16RGBImage::value_type(NumericTraits<UInt16>::max())));
1705 
1706  ImageExportInfo info(filenameBuf);
1707  exportImage(srcImageRange(usPyramid), info);
1708  }
1709 };
1710 
1711 // Export a pyramid as a set of UINT16 tiff files.
1712 template <typename PyramidImageType>
1713 void exportPyramid(vector<PyramidImageType*> *v, const char *prefix) {
1714  typedef typename NumericTraits<typename PyramidImageType::value_type>::isScalar pyramid_is_scalar;
1715  exportPyramid(v, prefix, pyramid_is_scalar());
1716 };
1717 #endif
1718 } // namespace enblend
1719 
1720 #endif /* __PYRAMID_H__ */
#define SKIPSM_EXPAND_SHIFT
Definition: pyramid2.h:894
T3 operator()(const T1 &a, const T2 &b) const
Definition: pyramid2.h:1359
void transformImage(vigra::triple< SrcImageIterator, SrcImageIterator, SrcAccessor > src, vigra::triple< DestImageIterator, DestImageIterator, DestAccessor > dest, std::pair< AlphaImageIterator, AlphaAccessor > alpha, vigra::Diff2D destUL, TRANSFORM &transform, PixelTransform &pixelTransform, bool warparound, Interpolator interpol, AppBase::ProgressDisplay *progress, bool singleThreaded=false)
Transform an image into the panorama.
#define SKIPSM_EXPAND_COLUMN_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11)
Definition: pyramid2.h:946
vigra::pair< typename ROIImage< Image, Alpha >::mask_traverser, typename ROIImage< Image, Alpha >::MaskAccessor > destMask(ROIImage< Image, Alpha > &img)
Definition: ROIImage.h:370
void reduce(bool wraparound, SrcImageIterator src_upperleft, SrcImageIterator src_lowerright, SrcAccessor sa, AlphaIterator alpha_upperleft, AlphaAccessor aa, DestImageIterator dest_upperleft, DestImageIterator dest_lowerright, DestAccessor da, DestAlphaIterator dest_alpha_upperleft, DestAlphaIterator dest_alpha_lowerright, DestAlphaAccessor daa)
The Burt &amp; Adelson Reduce operation.
Definition: pyramid2.h:178
#define IMUL5(A)
Definition: pyramid2.h:59
#define SKIPSM_EXPAND_ROW_COLUMN_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11)
Definition: pyramid2.h:1002
#define IMUL6(A)
Definition: pyramid2.h:58
#define IMUL11(A)
Definition: pyramid2.h:60
#define SKIPSM_EXPAND_COLUMN_END_WRAPAROUND(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11)
Definition: pyramid2.h:974
#define SKIPSM_EXPAND_ROW_END(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11)
Definition: pyramid2.h:924
IMPEX double h[25][1024]
Definition: emor.cpp:169
#define SKIPSM_EXPAND(SCALE_OUT00, SCALE_OUT10, SCALE_OUT01, SCALE_OUT11)
Definition: pyramid2.h:863
vigra::pair< typename ROIImage< Image, Alpha >::image_traverser, typename ROIImage< Image, Alpha >::ImageAccessor > destImage(ROIImage< Image, Alpha > &img)
Definition: ROIImage.h:324
vigra::triple< typename ROIImage< Image, Mask >::image_const_traverser, typename ROIImage< Image, Mask >::image_const_traverser, typename ROIImage< Image, Mask >::ImageConstAccessor > srcImageRange(const ROIImage< Image, Mask > &img)
helper function for ROIImages
Definition: ROIImage.h:287
#define AMUL6(A)
Definition: pyramid2.h:61
static T max(T x, T y)
Definition: svm.cpp:65
vigra::triple< typename ROIImage< Image, Alpha >::image_traverser, typename ROIImage< Image, Alpha >::image_traverser, typename ROIImage< Image, Alpha >::ImageAccessor > destImageRange(ROIImage< Image, Alpha > &img)
Definition: ROIImage.h:312
static void info(const char *fmt,...)
Definition: svm.cpp:95
void expand(bool add, bool wraparound, SrcImageIterator src_upperleft, SrcImageIterator src_lowerright, SrcAccessor sa, DestImageIterator dest_upperleft, DestImageIterator dest_lowerright, DestAccessor da, CombineFunctor cf)
The Burt &amp; Adelson Expand operation.
Definition: pyramid2.h:1089
unsigned int filterHalfWidth(const unsigned int levels)
Calculate the half-width of a n-level filter.
Definition: pyramid2.h:69
static T min(T x, T y)
Definition: svm.cpp:62