1 | |
2 | //
|
3 | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
4 | //
|
5 | // By downloading, copying, installing or using the software you agree to this license.
|
6 | // If you do not agree to this license, do not download, install,
|
7 | // copy or use the software.
|
8 | //
|
9 | //
|
10 | // License Agreement
|
11 | // For Open Source Computer Vision Library
|
12 | //
|
13 | // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
|
14 | // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
|
15 | // Third party copyrights are property of their respective owners.
|
16 | //
|
17 | // Redistribution and use in source and binary forms, with or without modification,
|
18 | // are permitted provided that the following conditions are met:
|
19 | //
|
20 | // * Redistribution's of source code must retain the above copyright notice,
|
21 | // this list of conditions and the following disclaimer.
|
22 | //
|
23 | // * Redistribution's in binary form must reproduce the above copyright notice,
|
24 | // this list of conditions and the following disclaimer in the documentation
|
25 | // and/or other materials provided with the distribution.
|
26 | //
|
27 | // * The name of the copyright holders may not be used to endorse or promote products
|
28 | // derived from this software without specific prior written permission.
|
29 | //
|
30 | // This software is provided by the copyright holders and contributors "as is" and
|
31 | // any express or implied warranties, including, but not limited to, the implied
|
32 | // warranties of merchantability and fitness for a particular purpose are disclaimed.
|
33 | // In no event shall the Intel Corporation or contributors be liable for any direct,
|
34 | // indirect, incidental, special, exemplary, or consequential damages
|
35 | // (including, but not limited to, procurement of substitute goods or services;
|
36 | // loss of use, data, or profits; or business interruption) however caused
|
37 | // and on any theory of liability, whether in contract, strict liability,
|
38 | // or tort (including negligence or otherwise) arising in any way out of
|
39 | // the use of this software, even if advised of the possibility of such damage.
|
40 | //
|
41 | //M*/
|
42 |
|
43 | #include "_cv.h"
|
44 | #include <limits.h>
|
45 | #include <stdio.h>
|
46 |
|
47 | |
48 | Basic Morphological Operations: Erosion & Dilation
|
49 | \****************************************************************************************/
|
50 |
|
51 | namespace cv
|
52 | {
|
53 |
|
54 | template<typename T> struct MinOp
|
55 | {
|
56 | typedef T type1;
|
57 | typedef T type2;
|
58 | typedef T rtype;
|
59 | T operator ()(T a, T b) const { return std::min(a, b); }
|
60 | };
|
61 |
|
62 | template<typename T> struct MaxOp
|
63 | {
|
64 | typedef T type1;
|
65 | typedef T type2;
|
66 | typedef T rtype;
|
67 | T operator ()(T a, T b) const { return std::max(a, b); }
|
68 | };
|
69 |
|
70 | #undef CV_MIN_8U
|
71 | #undef CV_MAX_8U
|
72 | #define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
|
73 | #define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
|
74 |
|
75 | template<> inline uchar MinOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
|
76 | template<> inline uchar MaxOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
|
77 |
|
78 | #if CV_SSE2
|
79 |
|
80 | template<class VecUpdate> struct MorphRowIVec
|
81 | {
|
82 | enum { ESZ = VecUpdate::ESZ };
|
83 |
|
84 | MorphRowIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
|
85 | int operator()(const uchar* src, uchar* dst, int width, int cn) const
|
86 | {
|
87 | if( !checkHardwareSupport(CV_CPU_SSE2) )
|
88 | return 0;
|
89 |
|
90 | cn *= ESZ;
|
91 | int i, k, _ksize = ksize*cn;
|
92 | width *= cn;
|
93 | VecUpdate updateOp;
|
94 |
|
95 | for( i = 0; i <= width - 16; i += 16 )
|
96 | {
|
97 | __m128i s = _mm_loadu_si128((const __m128i*)(src + i));
|
98 | for( k = cn; k < _ksize; k += cn )
|
99 | {
|
100 | __m128i x = _mm_loadu_si128((const __m128i*)(src + i + k));
|
101 | s = updateOp(s, x);
|
102 | }
|
103 | _mm_storeu_si128((__m128i*)(dst + i), s);
|
104 | }
|
105 |
|
106 | for( ; i <= width - 4; i += 4 )
|
107 | {
|
108 | __m128i s = _mm_cvtsi32_si128(*(const int*)(src + i));
|
109 | for( k = cn; k < _ksize; k += cn )
|
110 | {
|
111 | __m128i x = _mm_cvtsi32_si128(*(const int*)(src + i + k));
|
112 | s = updateOp(s, x);
|
113 | }
|
114 | *(int*)(dst + i) = _mm_cvtsi128_si32(s);
|
115 | }
|
116 |
|
117 | return i/ESZ;
|
118 | }
|
119 |
|
120 | int ksize, anchor;
|
121 | };
|
122 |
|
123 |
|
124 | template<class VecUpdate> struct MorphRowFVec
|
125 | {
|
126 | MorphRowFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
|
127 | int operator()(const uchar* src, uchar* dst, int width, int cn) const
|
128 | {
|
129 | if( !checkHardwareSupport(CV_CPU_SSE) )
|
130 | return 0;
|
131 |
|
132 | int i, k, _ksize = ksize*cn;
|
133 | width *= cn;
|
134 | VecUpdate updateOp;
|
135 |
|
136 | for( i = 0; i <= width - 4; i += 4 )
|
137 | {
|
138 | __m128 s = _mm_loadu_ps((const float*)src + i);
|
139 | for( k = cn; k < _ksize; k += cn )
|
140 | {
|
141 | __m128 x = _mm_loadu_ps((const float*)src + i + k);
|
142 | s = updateOp(s, x);
|
143 | }
|
144 | _mm_storeu_ps((float*)dst + i, s);
|
145 | }
|
146 |
|
147 | return i;
|
148 | }
|
149 |
|
150 | int ksize, anchor;
|
151 | };
|
152 |
|
153 |
|
154 | template<class VecUpdate> struct MorphColumnIVec
|
155 | {
|
156 | enum { ESZ = VecUpdate::ESZ };
|
157 |
|
158 | MorphColumnIVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
|
159 | int operator()(const uchar** src, uchar* dst, int dststep, int count, int width) const
|
160 | {
|
161 | if( !checkHardwareSupport(CV_CPU_SSE2) )
|
162 | return 0;
|
163 |
|
164 | int i = 0, k, _ksize = ksize;
|
165 | width *= ESZ;
|
166 | VecUpdate updateOp;
|
167 |
|
168 | for( i = 0; i < count + ksize - 1; i++ )
|
169 | CV_Assert( ((size_t)src[i] & 15) == 0 );
|
170 |
|
171 | for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
|
172 | {
|
173 | for( i = 0; i <= width - 32; i += 32 )
|
174 | {
|
175 | const uchar* sptr = src[1] + i;
|
176 | __m128i s0 = _mm_load_si128((const __m128i*)sptr);
|
177 | __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
178 | __m128i x0, x1;
|
179 |
|
180 | for( k = 2; k < _ksize; k++ )
|
181 | {
|
182 | sptr = src[k] + i;
|
183 | x0 = _mm_load_si128((const __m128i*)sptr);
|
184 | x1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
185 | s0 = updateOp(s0, x0);
|
186 | s1 = updateOp(s1, x1);
|
187 | }
|
188 |
|
189 | sptr = src[0] + i;
|
190 | x0 = _mm_load_si128((const __m128i*)sptr);
|
191 | x1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
192 | _mm_storeu_si128((__m128i*)(dst + i), updateOp(s0, x0));
|
193 | _mm_storeu_si128((__m128i*)(dst + i + 16), updateOp(s1, x1));
|
194 |
|
195 | sptr = src[k] + i;
|
196 | x0 = _mm_load_si128((const __m128i*)sptr);
|
197 | x1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
198 | _mm_storeu_si128((__m128i*)(dst + dststep + i), updateOp(s0, x0));
|
199 | _mm_storeu_si128((__m128i*)(dst + dststep + i + 16), updateOp(s1, x1));
|
200 | }
|
201 |
|
202 | for( ; i <= width - 8; i += 8 )
|
203 | {
|
204 | __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[1] + i)), x0;
|
205 |
|
206 | for( k = 2; k < _ksize; k++ )
|
207 | {
|
208 | x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
|
209 | s0 = updateOp(s0, x0);
|
210 | }
|
211 |
|
212 | x0 = _mm_loadl_epi64((const __m128i*)(src[0] + i));
|
213 | _mm_storel_epi64((__m128i*)(dst + i), updateOp(s0, x0));
|
214 | x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
|
215 | _mm_storel_epi64((__m128i*)(dst + dststep + i), updateOp(s0, x0));
|
216 | }
|
217 | }
|
218 |
|
219 | for( ; count > 0; count--, dst += dststep, src++ )
|
220 | {
|
221 | for( i = 0; i <= width - 32; i += 32 )
|
222 | {
|
223 | const uchar* sptr = src[0] + i;
|
224 | __m128i s0 = _mm_load_si128((const __m128i*)sptr);
|
225 | __m128i s1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
226 | __m128i x0, x1;
|
227 |
|
228 | for( k = 1; k < _ksize; k++ )
|
229 | {
|
230 | sptr = src[k] + i;
|
231 | x0 = _mm_load_si128((const __m128i*)sptr);
|
232 | x1 = _mm_load_si128((const __m128i*)(sptr + 16));
|
233 | s0 = updateOp(s0, x0);
|
234 | s1 = updateOp(s1, x1);
|
235 | }
|
236 | _mm_storeu_si128((__m128i*)(dst + i), s0);
|
237 | _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
|
238 | }
|
239 |
|
240 | for( ; i <= width - 8; i += 8 )
|
241 | {
|
242 | __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
|
243 |
|
244 | for( k = 1; k < _ksize; k++ )
|
245 | {
|
246 | x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
|
247 | s0 = updateOp(s0, x0);
|
248 | }
|
249 | _mm_storel_epi64((__m128i*)(dst + i), s0);
|
250 | }
|
251 | }
|
252 |
|
253 | return i/ESZ;
|
254 | }
|
255 |
|
256 | int ksize, anchor;
|
257 | };
|
258 |
|
259 |
|
260 | template<class VecUpdate> struct MorphColumnFVec
|
261 | {
|
262 | MorphColumnFVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
|
263 | int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
|
264 | {
|
265 | if( !checkHardwareSupport(CV_CPU_SSE) )
|
266 | return 0;
|
267 |
|
268 | int i = 0, k, _ksize = ksize;
|
269 | VecUpdate updateOp;
|
270 |
|
271 | for( i = 0; i < count + ksize - 1; i++ )
|
272 | CV_Assert( ((size_t)_src[i] & 15) == 0 );
|
273 |
|
274 | const float** src = (const float**)_src;
|
275 | float* dst = (float*)_dst;
|
276 | dststep /= sizeof(dst[0]);
|
277 |
|
278 | for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
|
279 | {
|
280 | for( i = 0; i <= width - 16; i += 16 )
|
281 | {
|
282 | const float* sptr = src[1] + i;
|
283 | __m128 s0 = _mm_load_ps(sptr);
|
284 | __m128 s1 = _mm_load_ps(sptr + 4);
|
285 | __m128 s2 = _mm_load_ps(sptr + 8);
|
286 | __m128 s3 = _mm_load_ps(sptr + 12);
|
287 | __m128 x0, x1, x2, x3;
|
288 |
|
289 | for( k = 2; k < _ksize; k++ )
|
290 | {
|
291 | sptr = src[k] + i;
|
292 | x0 = _mm_load_ps(sptr);
|
293 | x1 = _mm_load_ps(sptr + 4);
|
294 | s0 = updateOp(s0, x0);
|
295 | s1 = updateOp(s1, x1);
|
296 | x2 = _mm_load_ps(sptr + 8);
|
297 | x3 = _mm_load_ps(sptr + 12);
|
298 | s2 = updateOp(s2, x2);
|
299 | s3 = updateOp(s3, x3);
|
300 | }
|
301 |
|
302 | sptr = src[0] + i;
|
303 | x0 = _mm_load_ps(sptr);
|
304 | x1 = _mm_load_ps(sptr + 4);
|
305 | x2 = _mm_load_ps(sptr + 8);
|
306 | x3 = _mm_load_ps(sptr + 12);
|
307 | _mm_storeu_ps(dst + i, updateOp(s0, x0));
|
308 | _mm_storeu_ps(dst + i + 4, updateOp(s1, x1));
|
309 | _mm_storeu_ps(dst + i + 8, updateOp(s2, x2));
|
310 | _mm_storeu_ps(dst + i + 12, updateOp(s3, x3));
|
311 |
|
312 | sptr = src[k] + i;
|
313 | x0 = _mm_load_ps(sptr);
|
314 | x1 = _mm_load_ps(sptr + 4);
|
315 | x2 = _mm_load_ps(sptr + 8);
|
316 | x3 = _mm_load_ps(sptr + 12);
|
317 | _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
|
318 | _mm_storeu_ps(dst + dststep + i + 4, updateOp(s1, x1));
|
319 | _mm_storeu_ps(dst + dststep + i + 8, updateOp(s2, x2));
|
320 | _mm_storeu_ps(dst + dststep + i + 12, updateOp(s3, x3));
|
321 | }
|
322 |
|
323 | for( ; i <= width - 4; i += 4 )
|
324 | {
|
325 | __m128 s0 = _mm_load_ps(src[1] + i), x0;
|
326 |
|
327 | for( k = 2; k < _ksize; k++ )
|
328 | {
|
329 | x0 = _mm_load_ps(src[k] + i);
|
330 | s0 = updateOp(s0, x0);
|
331 | }
|
332 |
|
333 | x0 = _mm_load_ps(src[0] + i);
|
334 | _mm_storeu_ps(dst + i, updateOp(s0, x0));
|
335 | x0 = _mm_load_ps(src[k] + i);
|
336 | _mm_storeu_ps(dst + dststep + i, updateOp(s0, x0));
|
337 | }
|
338 | }
|
339 |
|
340 | for( ; count > 0; count--, dst += dststep, src++ )
|
341 | {
|
342 | for( i = 0; i <= width - 16; i += 16 )
|
343 | {
|
344 | const float* sptr = src[0] + i;
|
345 | __m128 s0 = _mm_load_ps(sptr);
|
346 | __m128 s1 = _mm_load_ps(sptr + 4);
|
347 | __m128 s2 = _mm_load_ps(sptr + 8);
|
348 | __m128 s3 = _mm_load_ps(sptr + 12);
|
349 | __m128 x0, x1, x2, x3;
|
350 |
|
351 | for( k = 1; k < _ksize; k++ )
|
352 | {
|
353 | sptr = src[k] + i;
|
354 | x0 = _mm_load_ps(sptr);
|
355 | x1 = _mm_load_ps(sptr + 4);
|
356 | s0 = updateOp(s0, x0);
|
357 | s1 = updateOp(s1, x1);
|
358 | x2 = _mm_load_ps(sptr + 8);
|
359 | x3 = _mm_load_ps(sptr + 12);
|
360 | s2 = updateOp(s2, x2);
|
361 | s3 = updateOp(s3, x3);
|
362 | }
|
363 | _mm_storeu_ps(dst + i, s0);
|
364 | _mm_storeu_ps(dst + i + 4, s1);
|
365 | _mm_storeu_ps(dst + i + 8, s2);
|
366 | _mm_storeu_ps(dst + i + 12, s3);
|
367 | }
|
368 |
|
369 | for( i = 0; i <= width - 4; i += 4 )
|
370 | {
|
371 | __m128 s0 = _mm_load_ps(src[0] + i), x0;
|
372 | for( k = 1; k < _ksize; k++ )
|
373 | {
|
374 | x0 = _mm_load_ps(src[k] + i);
|
375 | s0 = updateOp(s0, x0);
|
376 | }
|
377 | _mm_storeu_ps(dst + i, s0);
|
378 | }
|
379 | }
|
380 |
|
381 | return i;
|
382 | }
|
383 |
|
384 | int ksize, anchor;
|
385 | };
|
386 |
|
387 |
|
388 | template<class VecUpdate> struct MorphIVec
|
389 | {
|
390 | enum { ESZ = VecUpdate::ESZ };
|
391 |
|
392 | int operator()(uchar** src, int nz, uchar* dst, int width) const
|
393 | {
|
394 | if( !checkHardwareSupport(CV_CPU_SSE2) )
|
395 | return 0;
|
396 |
|
397 | int i, k;
|
398 | width *= ESZ;
|
399 | VecUpdate updateOp;
|
400 |
|
401 | for( i = 0; i <= width - 32; i += 32 )
|
402 | {
|
403 | const uchar* sptr = src[0] + i;
|
404 | __m128i s0 = _mm_loadu_si128((const __m128i*)sptr);
|
405 | __m128i s1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
|
406 | __m128i x0, x1;
|
407 |
|
408 | for( k = 1; k < nz; k++ )
|
409 | {
|
410 | sptr = src[k] + i;
|
411 | x0 = _mm_loadu_si128((const __m128i*)sptr);
|
412 | x1 = _mm_loadu_si128((const __m128i*)(sptr + 16));
|
413 | s0 = updateOp(s0, x0);
|
414 | s1 = updateOp(s1, x1);
|
415 | }
|
416 | _mm_storeu_si128((__m128i*)(dst + i), s0);
|
417 | _mm_storeu_si128((__m128i*)(dst + i + 16), s1);
|
418 | }
|
419 |
|
420 | for( ; i <= width - 8; i += 8 )
|
421 | {
|
422 | __m128i s0 = _mm_loadl_epi64((const __m128i*)(src[0] + i)), x0;
|
423 |
|
424 | for( k = 1; k < nz; k++ )
|
425 | {
|
426 | x0 = _mm_loadl_epi64((const __m128i*)(src[k] + i));
|
427 | s0 = updateOp(s0, x0);
|
428 | }
|
429 | _mm_storel_epi64((__m128i*)(dst + i), s0);
|
430 | }
|
431 |
|
432 | return i/ESZ;
|
433 | }
|
434 | };
|
435 |
|
436 |
|
437 | template<class VecUpdate> struct MorphFVec
|
438 | {
|
439 | int operator()(uchar** _src, int nz, uchar* _dst, int width) const
|
440 | {
|
441 | if( !checkHardwareSupport(CV_CPU_SSE) )
|
442 | return 0;
|
443 |
|
444 | const float** src = (const float**)_src;
|
445 | float* dst = (float*)_dst;
|
446 | int i, k;
|
447 | VecUpdate updateOp;
|
448 |
|
449 | for( i = 0; i <= width - 16; i += 16 )
|
450 | {
|
451 | const float* sptr = src[0] + i;
|
452 | __m128 s0 = _mm_loadu_ps(sptr);
|
453 | __m128 s1 = _mm_loadu_ps(sptr + 4);
|
454 | __m128 s2 = _mm_loadu_ps(sptr + 8);
|
455 | __m128 s3 = _mm_loadu_ps(sptr + 12);
|
456 | __m128 x0, x1, x2, x3;
|
457 |
|
458 | for( k = 1; k < nz; k++ )
|
459 | {
|
460 | sptr = src[k] + i;
|
461 | x0 = _mm_loadu_ps(sptr);
|
462 | x1 = _mm_loadu_ps(sptr + 4);
|
463 | x2 = _mm_loadu_ps(sptr + 8);
|
464 | x3 = _mm_loadu_ps(sptr + 12);
|
465 | s0 = updateOp(s0, x0);
|
466 | s1 = updateOp(s1, x1);
|
467 | s2 = updateOp(s2, x2);
|
468 | s3 = updateOp(s3, x3);
|
469 | }
|
470 | _mm_storeu_ps(dst + i, s0);
|
471 | _mm_storeu_ps(dst + i + 4, s1);
|
472 | _mm_storeu_ps(dst + i + 8, s2);
|
473 | _mm_storeu_ps(dst + i + 12, s3);
|
474 | }
|
475 |
|
476 | for( ; i <= width - 4; i += 4 )
|
477 | {
|
478 | __m128 s0 = _mm_loadu_ps(src[0] + i), x0;
|
479 |
|
480 | for( k = 1; k < nz; k++ )
|
481 | {
|
482 | x0 = _mm_loadu_ps(src[k] + i);
|
483 | s0 = updateOp(s0, x0);
|
484 | }
|
485 | _mm_storeu_ps(dst + i, s0);
|
486 | }
|
487 |
|
488 | for( ; i < width; i++ )
|
489 | {
|
490 | __m128 s0 = _mm_load_ss(src[0] + i), x0;
|
491 |
|
492 | for( k = 1; k < nz; k++ )
|
493 | {
|
494 | x0 = _mm_load_ss(src[k] + i);
|
495 | s0 = updateOp(s0, x0);
|
496 | }
|
497 | _mm_store_ss(dst + i, s0);
|
498 | }
|
499 |
|
500 | return i;
|
501 | }
|
502 | };
|
503 |
|
504 | struct VMin8u
|
505 | {
|
506 | enum { ESZ = 1 };
|
507 | __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }
|
508 | };
|
509 | struct VMax8u
|
510 | {
|
511 | enum { ESZ = 1 };
|
512 | __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }
|
513 | };
|
514 | struct VMin16u
|
515 | {
|
516 | enum { ESZ = 2 };
|
517 | __m128i operator()(const __m128i& a, const __m128i& b) const
|
518 | { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
|
519 | };
|
520 | struct VMax16u
|
521 | {
|
522 | enum { ESZ = 2 };
|
523 | __m128i operator()(const __m128i& a, const __m128i& b) const
|
524 | { return _mm_adds_epu16(_mm_subs_epu16(a,b), b); }
|
525 | };
|
526 | struct VMin16s
|
527 | {
|
528 | enum { ESZ = 2 };
|
529 | __m128i operator()(const __m128i& a, const __m128i& b) const
|
530 | { return _mm_min_epi16(a, b); }
|
531 | };
|
532 | struct VMax16s
|
533 | {
|
534 | enum { ESZ = 2 };
|
535 | __m128i operator()(const __m128i& a, const __m128i& b) const
|
536 | { return _mm_max_epi16(a, b); }
|
537 | };
|
538 | struct VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
|
539 | struct VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
|
540 |
|
541 | typedef MorphRowIVec<VMin8u> ErodeRowVec8u;
|
542 | typedef MorphRowIVec<VMax8u> DilateRowVec8u;
|
543 | typedef MorphRowIVec<VMin16u> ErodeRowVec16u;
|
544 | typedef MorphRowIVec<VMax16u> DilateRowVec16u;
|
545 | typedef MorphRowIVec<VMin16s> ErodeRowVec16s;
|
546 | typedef MorphRowIVec<VMax16s> DilateRowVec16s;
|
547 | typedef MorphRowFVec<VMin32f> ErodeRowVec32f;
|
548 | typedef MorphRowFVec<VMax32f> DilateRowVec32f;
|
549 |
|
550 | typedef MorphColumnIVec<VMin8u> ErodeColumnVec8u;
|
551 | typedef MorphColumnIVec<VMax8u> DilateColumnVec8u;
|
552 | typedef MorphColumnIVec<VMin16u> ErodeColumnVec16u;
|
553 | typedef MorphColumnIVec<VMax16u> DilateColumnVec16u;
|
554 | typedef MorphColumnIVec<VMin16s> ErodeColumnVec16s;
|
555 | typedef MorphColumnIVec<VMax16s> DilateColumnVec16s;
|
556 | typedef MorphColumnFVec<VMin32f> ErodeColumnVec32f;
|
557 | typedef MorphColumnFVec<VMax32f> DilateColumnVec32f;
|
558 |
|
559 | typedef MorphIVec<VMin8u> ErodeVec8u;
|
560 | typedef MorphIVec<VMax8u> DilateVec8u;
|
561 | typedef MorphIVec<VMin16u> ErodeVec16u;
|
562 | typedef MorphIVec<VMax16u> DilateVec16u;
|
563 | typedef MorphIVec<VMin16s> ErodeVec16s;
|
564 | typedef MorphIVec<VMax16s> DilateVec16s;
|
565 | typedef MorphFVec<VMin32f> ErodeVec32f;
|
566 | typedef MorphFVec<VMax32f> DilateVec32f;
|
567 |
|
568 | #else
|
569 |
|
570 | struct MorphRowNoVec
|
571 | {
|
572 | MorphRowNoVec(int, int) {}
|
573 | int operator()(const uchar*, uchar*, int, int) const { return 0; }
|
574 | };
|
575 |
|
576 | struct MorphColumnNoVec
|
577 | {
|
578 | MorphColumnNoVec(int, int) {}
|
579 | int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
|
580 | };
|
581 |
|
582 | struct MorphNoVec
|
583 | {
|
584 | int operator()(uchar**, int, uchar*, int) const { return 0; }
|
585 | };
|
586 |
|
587 | typedef MorphRowNoVec ErodeRowVec8u;
|
588 | typedef MorphRowNoVec DilateRowVec8u;
|
589 | typedef MorphRowNoVec ErodeRowVec16u;
|
590 | typedef MorphRowNoVec DilateRowVec16u;
|
591 | typedef MorphRowNoVec ErodeRowVec16s;
|
592 | typedef MorphRowNoVec DilateRowVec16s;
|
593 | typedef MorphRowNoVec ErodeRowVec32f;
|
594 | typedef MorphRowNoVec DilateRowVec32f;
|
595 |
|
596 | typedef MorphColumnNoVec ErodeColumnVec8u;
|
597 | typedef MorphColumnNoVec DilateColumnVec8u;
|
598 | typedef MorphColumnNoVec ErodeColumnVec16u;
|
599 | typedef MorphColumnNoVec DilateColumnVec16u;
|
600 | typedef MorphColumnNoVec ErodeColumnVec16s;
|
601 | typedef MorphColumnNoVec DilateColumnVec16s;
|
602 | typedef MorphColumnNoVec ErodeColumnVec32f;
|
603 | typedef MorphColumnNoVec DilateColumnVec32f;
|
604 |
|
605 | typedef MorphNoVec ErodeVec8u;
|
606 | typedef MorphNoVec DilateVec8u;
|
607 | typedef MorphNoVec ErodeVec16u;
|
608 | typedef MorphNoVec DilateVec16u;
|
609 | typedef MorphNoVec ErodeVec16s;
|
610 | typedef MorphNoVec DilateVec16s;
|
611 | typedef MorphNoVec ErodeVec32f;
|
612 | typedef MorphNoVec DilateVec32f;
|
613 |
|
614 | #endif
|
615 |
|
616 | template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
|
617 | {
|
618 | typedef typename Op::rtype T;
|
619 |
|
620 | MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
|
621 | {
|
622 | ksize = _ksize;
|
623 | anchor = _anchor;
|
624 | }
|
625 |
|
626 | void operator()(const uchar* src, uchar* dst, int width, int cn)
|
627 | {
|
628 | static int initial_cn = cn;
|
629 | int i, j, k, _ksize = ksize*cn;
|
630 | const T* S = (const T*)src;
|
631 | Op op;
|
632 | T* D = (T*)dst;
|
633 |
|
634 | if( _ksize == cn )
|
635 | {
|
636 | for( i = 0; i < width*cn; i++ )
|
637 | D[i] = S[i];
|
638 | return;
|
639 | }
|
640 |
|
641 | int i0 = vecOp(src, dst, width, cn);
|
642 | width *= cn;
|
643 |
|
644 | for( k = 0; k < cn; k++, S++, D++ )
|
645 | {
|
646 | for( i = i0; i <= width - cn*2; i += cn*2 )
|
647 | {
|
648 | const T* s = S + i;
|
649 | T m = s[cn];
|
650 | for( j = cn*2; j < _ksize; j += cn )
|
651 | m = op(m, s[j]);
|
652 | D[i] = op(m, s[0]);
|
653 | D[i+cn] = op(m, s[j]);
|
654 | }
|
655 |
|
656 | for( ; i < width; i += cn )
|
657 | {
|
658 | const T* s = S + i;
|
659 | T m = s[0];
|
660 | for( j = cn; j < _ksize; j += cn )
|
661 | m = op(m, s[j]);
|
662 | D[i] = m;
|
663 | }
|
664 | }
|
665 | }
|
666 |
|
667 | VecOp vecOp;
|
668 | };
|
669 |
|
670 |
|
671 | template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
|
672 | {
|
673 | typedef typename Op::rtype T;
|
674 |
|
675 | MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
|
676 | {
|
677 | ksize = _ksize;
|
678 | anchor = _anchor;
|
679 | }
|
680 |
|
681 | void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width)
|
682 | {
|
683 | int i, k, _ksize = ksize;
|
684 | const T** src = (const T**)_src;
|
685 | T* D = (T*)dst;
|
686 | Op op;
|
687 |
|
688 | int i0 = vecOp(_src, dst, dststep, count, width);
|
689 | dststep /= sizeof(D[0]);
|
690 |
|
691 | for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
|
692 | {
|
693 | for( i = i0; i <= width - 4; i += 4 )
|
694 | {
|
695 | const T* sptr = src[1] + i;
|
696 | T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
|
697 |
|
698 | for( k = 2; k < _ksize; k++ )
|
699 | {
|
700 | sptr = src[k] + i;
|
701 | s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
|
702 | s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
|
703 | }
|
704 |
|
705 | sptr = src[0] + i;
|
706 | D[i] = op(s0, sptr[0]);
|
707 | D[i+1] = op(s1, sptr[1]);
|
708 | D[i+2] = op(s2, sptr[2]);
|
709 | D[i+3] = op(s3, sptr[3]);
|
710 |
|
711 | sptr = src[k] + i;
|
712 | D[i+dststep] = op(s0, sptr[0]);
|
713 | D[i+dststep+1] = op(s1, sptr[1]);
|
714 | D[i+dststep+2] = op(s2, sptr[2]);
|
715 | D[i+dststep+3] = op(s3, sptr[3]);
|
716 | }
|
717 |
|
718 | for( ; i < width; i++ )
|
719 | {
|
720 | T s0 = src[1][i];
|
721 |
|
722 | for( k = 2; k < _ksize; k++ )
|
723 | s0 = op(s0, src[k][i]);
|
724 |
|
725 | D[i] = op(s0, src[0][i]);
|
726 | D[i+dststep] = op(s0, src[k][i]);
|
727 | }
|
728 | }
|
729 |
|
730 | for( ; count > 0; count--, D += dststep, src++ )
|
731 | {
|
732 | for( i = i0; i <= width - 4; i += 4 )
|
733 | {
|
734 | const T* sptr = src[0] + i;
|
735 | T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
|
736 |
|
737 | for( k = 1; k < _ksize; k++ )
|
738 | {
|
739 | sptr = src[k] + i;
|
740 | s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
|
741 | s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
|
742 | }
|
743 |
|
744 | D[i] = s0; D[i+1] = s1;
|
745 | D[i+2] = s2; D[i+3] = s3;
|
746 | }
|
747 |
|
748 | for( ; i < width; i++ )
|
749 | {
|
750 | T s0 = src[0][i];
|
751 | for( k = 1; k < _ksize; k++ )
|
752 | s0 = op(s0, src[k][i]);
|
753 | D[i] = s0;
|
754 | }
|
755 | }
|
756 | }
|
757 |
|
758 | VecOp vecOp;
|
759 | };
|
760 |
|
761 |
|
762 | template<class Op, class VecOp> struct MorphFilter : BaseFilter
|
763 | {
|
764 | typedef typename Op::rtype T;
|
765 |
|
766 | MorphFilter( const Mat& _kernel, Point _anchor )
|
767 | {
|
768 | anchor = _anchor;
|
769 | ksize = _kernel.size();
|
770 | CV_Assert( _kernel.type() == CV_8U );
|
771 |
|
772 | vector<uchar> coeffs;
|
773 |
|
774 | preprocess2DKernel( _kernel, coords, coeffs );
|
775 | ptrs.resize( coords.size() );
|
776 | }
|
777 |
|
778 | void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn)
|
779 | {
|
780 | const Point* pt = &coords[0];
|
781 | const T** kp = (const T**)&ptrs[0];
|
782 | int i, k, nz = (int)coords.size();
|
783 | Op op;
|
784 |
|
785 | width *= cn;
|
786 | for( ; count > 0; count--, dst += dststep, src++ )
|
787 | {
|
788 | T* D = (T*)dst;
|
789 |
|
790 | for( k = 0; k < nz; k++ )
|
791 | kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
|
792 |
|
793 | i = vecOp(&ptrs[0], nz, dst, width);
|
794 |
|
795 | for( ; i <= width - 4; i += 4 )
|
796 | {
|
797 | const T* sptr = kp[0] + i;
|
798 | T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
|
799 |
|
800 | for( k = 1; k < nz; k++ )
|
801 | {
|
802 | sptr = kp[k] + i;
|
803 | s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
|
804 | s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
|
805 | }
|
806 |
|
807 | D[i] = s0; D[i+1] = s1;
|
808 | D[i+2] = s2; D[i+3] = s3;
|
809 | }
|
810 |
|
811 | for( ; i < width; i++ )
|
812 | {
|
813 | T s0 = kp[0][i];
|
814 | for( k = 1; k < nz; k++ )
|
815 | s0 = op(s0, kp[k][i]);
|
816 | D[i] = s0;
|
817 | }
|
818 | }
|
819 | }
|
820 |
|
821 | vector<Point> coords;
|
822 | vector<uchar*> ptrs;
|
823 | VecOp vecOp;
|
824 | };
|
825 |
|
826 |
|
827 |
|
828 | Ptr<BaseRowFilter> getMorphologyRowFilter(int op, int type, int ksize, int anchor)
|
829 | {
|
830 | int depth = CV_MAT_DEPTH(type);
|
831 | if( anchor < 0 )
|
832 | anchor = ksize/2;
|
833 | CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
|
834 | if( op == MORPH_ERODE )
|
835 | {
|
836 | if( depth == CV_8U )
|
837 | return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<uchar>,
|
838 | ErodeRowVec8u>(ksize, anchor));
|
839 | if( depth == CV_16U )
|
840 | return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<ushort>,
|
841 | ErodeRowVec16u>(ksize, anchor));
|
842 | if( depth == CV_16S )
|
843 | return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<short>,
|
844 | ErodeRowVec16s>(ksize, anchor));
|
845 | if( depth == CV_32F )
|
846 | return Ptr<BaseRowFilter>(new MorphRowFilter<MinOp<float>,
|
847 | ErodeRowVec32f>(ksize, anchor));
|
848 | }
|
849 | else
|
850 | {
|
851 | if( depth == CV_8U )
|
852 | return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<uchar>,
|
853 | DilateRowVec8u>(ksize, anchor));
|
854 | if( depth == CV_16U )
|
855 | return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<ushort>,
|
856 | DilateRowVec16u>(ksize, anchor));
|
857 | if( depth == CV_16S )
|
858 | return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<short>,
|
859 | DilateRowVec16s>(ksize, anchor));
|
860 | if( depth == CV_32F )
|
861 | return Ptr<BaseRowFilter>(new MorphRowFilter<MaxOp<float>,
|
862 | DilateRowVec32f>(ksize, anchor));
|
863 | }
|
864 |
|
865 | CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
|
866 | return Ptr<BaseRowFilter>(0);
|
867 | }
|
868 |
|
869 | Ptr<BaseColumnFilter> getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
|
870 | {
|
871 | int depth = CV_MAT_DEPTH(type);
|
872 | if( anchor < 0 )
|
873 | anchor = ksize/2;
|
874 | CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
|
875 | if( op == MORPH_ERODE )
|
876 | {
|
877 | if( depth == CV_8U )
|
878 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<uchar>,
|
879 | ErodeColumnVec8u>(ksize, anchor));
|
880 | if( depth == CV_16U )
|
881 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<ushort>,
|
882 | ErodeColumnVec16u>(ksize, anchor));
|
883 | if( depth == CV_16S )
|
884 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<short>,
|
885 | ErodeColumnVec16s>(ksize, anchor));
|
886 | if( depth == CV_32F )
|
887 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MinOp<float>,
|
888 | ErodeColumnVec32f>(ksize, anchor));
|
889 | }
|
890 | else
|
891 | {
|
892 | if( depth == CV_8U )
|
893 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<uchar>,
|
894 | DilateColumnVec8u>(ksize, anchor));
|
895 | if( depth == CV_16U )
|
896 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<ushort>,
|
897 | DilateColumnVec16u>(ksize, anchor));
|
898 | if( depth == CV_16S )
|
899 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<short>,
|
900 | DilateColumnVec16s>(ksize, anchor));
|
901 | if( depth == CV_32F )
|
902 | return Ptr<BaseColumnFilter>(new MorphColumnFilter<MaxOp<float>,
|
903 | DilateColumnVec32f>(ksize, anchor));
|
904 | }
|
905 |
|
906 | CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
|
907 | return Ptr<BaseColumnFilter>(0);
|
908 | }
|
909 |
|
910 |
|
911 | Ptr<BaseFilter> getMorphologyFilter(int op, int type, const Mat& kernel, Point anchor)
|
912 | {
|
913 | int depth = CV_MAT_DEPTH(type);
|
914 | anchor = normalizeAnchor(anchor, kernel.size());
|
915 | CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
|
916 | if( op == MORPH_ERODE )
|
917 | {
|
918 | if( depth == CV_8U )
|
919 | return Ptr<BaseFilter>(new MorphFilter<MinOp<uchar>, ErodeVec8u>(kernel, anchor));
|
920 | if( depth == CV_16U )
|
921 | return Ptr<BaseFilter>(new MorphFilter<MinOp<ushort>, ErodeVec16u>(kernel, anchor));
|
922 | if( depth == CV_16S )
|
923 | return Ptr<BaseFilter>(new MorphFilter<MinOp<short>, ErodeVec16s>(kernel, anchor));
|
924 | if( depth == CV_32F )
|
925 | return Ptr<BaseFilter>(new MorphFilter<MinOp<float>, ErodeVec32f>(kernel, anchor));
|
926 | }
|
927 | else
|
928 | {
|
929 | if( depth == CV_8U )
|
930 | return Ptr<BaseFilter>(new MorphFilter<MaxOp<uchar>, DilateVec8u>(kernel, anchor));
|
931 | if( depth == CV_16U )
|
932 | return Ptr<BaseFilter>(new MorphFilter<MaxOp<ushort>, DilateVec16u>(kernel, anchor));
|
933 | if( depth == CV_16S )
|
934 | return Ptr<BaseFilter>(new MorphFilter<MaxOp<short>, DilateVec16s>(kernel, anchor));
|
935 | if( depth == CV_32F )
|
936 | return Ptr<BaseFilter>(new MorphFilter<MaxOp<float>, DilateVec32f>(kernel, anchor));
|
937 | }
|
938 |
|
939 | CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
|
940 | return Ptr<BaseFilter>(0);
|
941 | }
|
942 |
|
943 |
|
944 | Ptr<FilterEngine> createMorphologyFilter( int op, int type, const Mat& kernel,
|
945 | Point anchor, int _rowBorderType, int _columnBorderType,
|
946 | const Scalar& _borderValue )
|
947 | {
|
948 | anchor = normalizeAnchor(anchor, kernel.size());
|
949 |
|
950 | Ptr<BaseRowFilter> rowFilter;
|
951 | Ptr<BaseColumnFilter> columnFilter;
|
952 | Ptr<BaseFilter> filter2D;
|
953 |
|
954 | if( countNonZero(kernel) == kernel.rows*kernel.cols )
|
955 | {
|
956 |
|
957 | rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x);
|
958 | columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y);
|
959 | }
|
960 | else
|
961 | filter2D = getMorphologyFilter(op, type, kernel, anchor);
|
962 |
|
963 | Scalar borderValue = _borderValue;
|
964 | if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) &&
|
965 | borderValue == morphologyDefaultBorderValue() )
|
966 | {
|
967 | int depth = CV_MAT_DEPTH(type);
|
968 | CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_32F );
|
969 | if( op == MORPH_ERODE )
|
970 | borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX :
|
971 | depth == CV_16U ? (double)USHRT_MAX : (double)FLT_MAX );
|
972 | else
|
973 | borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ?
|
974 | 0. : (double)-FLT_MAX );
|
975 | }
|
976 |
|
977 | return Ptr<FilterEngine>(new FilterEngine(filter2D, rowFilter, columnFilter,
|
978 | type, type, type, _rowBorderType, _columnBorderType, borderValue ));
|
979 | }
|
980 |
|
981 |
|
982 | Mat getStructuringElement(int shape, Size ksize, Point anchor)
|
983 | {
|
984 | int i, j;
|
985 | int r = 0, c = 0;
|
986 | double inv_r2 = 0;
|
987 |
|
988 | CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE );
|
989 |
|
990 | anchor = normalizeAnchor(anchor, ksize);
|
991 |
|
992 | if( ksize == Size(1,1) )
|
993 | shape = MORPH_RECT;
|
994 |
|
995 | if( shape == MORPH_ELLIPSE )
|
996 | {
|
997 | r = ksize.height/2;
|
998 | c = ksize.width/2;
|
999 | inv_r2 = r ? 1./((double)r*r) : 0;
|
1000 | }
|
1001 |
|
1002 | Mat elem(ksize, CV_8U);
|
1003 |
|
1004 | for( i = 0; i < ksize.height; i++ )
|
1005 | {
|
1006 | uchar* ptr = elem.data + i*elem.step;
|
1007 | int j1 = 0, j2 = 0;
|
1008 |
|
1009 | if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
|
1010 | j2 = ksize.width;
|
1011 | else if( shape == MORPH_CROSS )
|
1012 | j1 = anchor.x, j2 = j1 + 1;
|
1013 | else
|
1014 | {
|
1015 | int dy = i - r;
|
1016 | if( std::abs(dy) <= r )
|
1017 | {
|
1018 | int dx = saturate_cast<int>(c*std::sqrt((r*r - dy*dy)*inv_r2));
|
1019 | j1 = std::max( c - dx, 0 );
|
1020 | j2 = std::min( c + dx + 1, ksize.width );
|
1021 | }
|
1022 | }
|
1023 |
|
1024 | for( j = 0; j < j1; j++ )
|
1025 | ptr[j] = 0;
|
1026 | for( ; j < j2; j++ )
|
1027 | ptr[j] = 1;
|
1028 | for( ; j < ksize.width; j++ )
|
1029 | ptr[j] = 0;
|
1030 | }
|
1031 |
|
1032 | return elem;
|
1033 | }
|
1034 |
|
1035 | static void morphOp( int op, const Mat& src, Mat& dst, const Mat& _kernel,
|
1036 | Point anchor, int iterations,
|
1037 | int borderType, const Scalar& borderValue )
|
1038 | {
|
1039 | Mat kernel;
|
1040 | Size ksize = _kernel.data ? _kernel.size() : Size(3,3);
|
1041 | anchor = normalizeAnchor(anchor, ksize);
|
1042 |
|
1043 | CV_Assert( anchor.inside(Rect(0, 0, ksize.width, ksize.height)) );
|
1044 |
|
1045 | if( iterations == 0 || _kernel.rows*_kernel.cols == 1 )
|
1046 | {
|
1047 | src.copyTo(dst);
|
1048 | return;
|
1049 | }
|
1050 |
|
1051 | dst.create( src.size(), src.type() );
|
1052 |
|
1053 | if( !_kernel.data )
|
1054 | {
|
1055 | kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
|
1056 | anchor = Point(iterations, iterations);
|
1057 | iterations = 1;
|
1058 | }
|
1059 | else if( iterations > 1 && countNonZero(_kernel) == _kernel.rows*_kernel.cols )
|
1060 | {
|
1061 | anchor = Point(anchor.x*iterations, anchor.y*iterations);
|
1062 | kernel = getStructuringElement(MORPH_RECT,
|
1063 | Size(ksize.width + iterations*(ksize.width-1),
|
1064 | ksize.height + iterations*(ksize.height-1)),
|
1065 | anchor);
|
1066 | iterations = 1;
|
1067 | }
|
1068 | else
|
1069 | kernel = _kernel;
|
1070 |
|
1071 | Ptr<FilterEngine> f = createMorphologyFilter(op, src.type(),
|
1072 | kernel, anchor, borderType, borderType, borderValue );
|
1073 |
|
1074 | f->apply( src, dst );
|
1075 | for( int i = 1; i < iterations; i++ )
|
1076 | f->apply( dst, dst );
|
1077 | }
|
1078 |
|
1079 |
|
1080 | void erode( const Mat& src, Mat& dst, const Mat& kernel,
|
1081 | Point anchor, int iterations,
|
1082 | int borderType, const Scalar& borderValue )
|
1083 | {
|
1084 | morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1085 | }
|
1086 |
|
1087 |
|
1088 | void dilate( const Mat& src, Mat& dst, const Mat& kernel,
|
1089 | Point anchor, int iterations,
|
1090 | int borderType, const Scalar& borderValue )
|
1091 | {
|
1092 | morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1093 | }
|
1094 |
|
1095 |
|
1096 | void morphologyEx( const Mat& src, Mat& dst, int op, const Mat& kernel,
|
1097 | Point anchor, int iterations, int borderType,
|
1098 | const Scalar& borderValue )
|
1099 | {
|
1100 | Mat temp;
|
1101 | switch( op )
|
1102 | {
|
1103 | case MORPH_ERODE:
|
1104 | erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1105 | break;
|
1106 | case MORPH_DILATE:
|
1107 | dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1108 | break;
|
1109 | case MORPH_OPEN:
|
1110 | erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1111 | dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue );
|
1112 | break;
|
1113 | case CV_MOP_CLOSE:
|
1114 | dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1115 | erode( dst, dst, kernel, anchor, iterations, borderType, borderValue );
|
1116 | break;
|
1117 | case CV_MOP_GRADIENT:
|
1118 | erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
|
1119 | dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
|
1120 | dst -= temp;
|
1121 | break;
|
1122 | case CV_MOP_TOPHAT:
|
1123 | if( src.data != dst.data )
|
1124 | temp = dst;
|
1125 | erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
|
1126 | dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue );
|
1127 | dst = src - temp;
|
1128 | break;
|
1129 | case CV_MOP_BLACKHAT:
|
1130 | if( src.data != dst.data )
|
1131 | temp = dst;
|
1132 | dilate( src, temp, kernel, anchor, iterations, borderType, borderValue );
|
1133 | erode( temp, temp, kernel, anchor, iterations, borderType, borderValue );
|
1134 | dst = temp - src;
|
1135 | break;
|
1136 | default:
|
1137 | CV_Error( CV_StsBadArg, "unknown morphological operation" );
|
1138 | }
|
1139 | }
|
1140 |
|
1141 | }
|
1142 |
|
1143 | CV_IMPL IplConvKernel *
|
1144 | cvCreateStructuringElementEx( int cols, int rows,
|
1145 | int anchorX, int anchorY,
|
1146 | int shape, int *values )
|
1147 | {
|
1148 | cv::Size ksize = cv::Size(cols, rows);
|
1149 | cv::Point anchor = cv::Point(anchorX, anchorY);
|
1150 | CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) &&
|
1151 | (shape != CV_SHAPE_CUSTOM || values != 0));
|
1152 |
|
1153 | int i, size = rows * cols;
|
1154 | int element_size = sizeof(IplConvKernel) + size*sizeof(int);
|
1155 | IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32);
|
1156 |
|
1157 | element->nCols = cols;
|
1158 | element->nRows = rows;
|
1159 | element->anchorX = anchorX;
|
1160 | element->anchorY = anchorY;
|
1161 | element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
|
1162 | element->values = (int*)(element + 1);
|
1163 |
|
1164 | if( shape == CV_SHAPE_CUSTOM )
|
1165 | {
|
1166 | for( i = 0; i < size; i++ )
|
1167 | element->values[i] = values[i];
|
1168 | }
|
1169 | else
|
1170 | {
|
1171 | cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
|
1172 | for( i = 0; i < size; i++ )
|
1173 | element->values[i] = elem.data[i];
|
1174 | }
|
1175 |
|
1176 | return element;
|
1177 | }
|
1178 |
|
1179 |
|
1180 | CV_IMPL void
|
1181 | cvReleaseStructuringElement( IplConvKernel ** element )
|
1182 | {
|
1183 | if( !element )
|
1184 | CV_Error( CV_StsNullPtr, "" );
|
1185 | cvFree( element );
|
1186 | }
|
1187 |
|
1188 |
|
1189 | static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor )
|
1190 | {
|
1191 | if(!src)
|
1192 | {
|
1193 | anchor = cv::Point(1,1);
|
1194 | dst.release();
|
1195 | return;
|
1196 | }
|
1197 | anchor = cv::Point(src->anchorX, src->anchorY);
|
1198 | dst.create(src->nRows, src->nCols, CV_8U);
|
1199 |
|
1200 | int i, size = src->nRows*src->nCols;
|
1201 | for( i = 0; i < size; i++ )
|
1202 | dst.data[i] = (uchar)src->values[i];
|
1203 | }
|
1204 |
|
1205 |
|
1206 | CV_IMPL void
|
1207 | cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
|
1208 | {
|
1209 | cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
|
1210 | CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
|
1211 | cv::Point anchor;
|
1212 | convertConvKernel( element, kernel, anchor );
|
1213 | cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
|
1214 | }
|
1215 |
|
1216 |
|
1217 | CV_IMPL void
|
1218 | cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
|
1219 | {
|
1220 | cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
|
1221 | CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
|
1222 | cv::Point anchor;
|
1223 | convertConvKernel( element, kernel, anchor );
|
1224 | cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
|
1225 | }
|
1226 |
|
1227 |
|
1228 | CV_IMPL void
|
1229 | cvMorphologyEx( const void* srcarr, void* dstarr, void*,
|
1230 | IplConvKernel* element, int op, int iterations )
|
1231 | {
|
1232 | cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
|
1233 | CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
|
1234 | cv::Point anchor;
|
1235 | IplConvKernel* temp_element = NULL;
|
1236 | if (!element)
|
1237 | {
|
1238 | temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
|
1239 | } else {
|
1240 | temp_element = element;
|
1241 | }
|
1242 | convertConvKernel( temp_element, kernel, anchor );
|
1243 | if (!element)
|
1244 | {
|
1245 | cvReleaseStructuringElement(&temp_element);
|
1246 | }
|
1247 | cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE );
|
1248 | }
|
1249 |
|
1250 |
|