c#|c# opencv 轮廓检测_OPENCV图像轮廓检测

前面在图像转换的时候学到canny算子,可以检测出图像的轮廓信息,但是,该算子检测到的轮廓信息还需要我们手动的用眼睛去识别,而实际工程应用中,我们需要得到轮廓的具体数学信息,这就涉及到今天的主题,图像轮廓检测.
一.图像轮廓检测
在opencv中,轮廓对应着一系列的点的集合,opencv提供了一个函数,用来获得这些点的集合
API:void finContours(输入图像,输出轮廓点集,输出向量,int 轮廓检索模式,int 轮廓近似方法,Point 轮廓点的可选偏移量)
注:1.输入图像,是单通道八位阈值化图像,也就是对应canny检测之后的图像,如果不是阈值化图像,算法会将图像中不为0的点当成1,0点当成0处理,我们可以通过canny threshold adaptiveThreshold来处理,
2.该函数在检测轮廓的时候会修改源图像的数据,所以最好不要拿源图像直接检测轮廓,最好是用拷贝
3.输出点集的形式为vector>contours,每个向量包含有多个向量,包含的向量一个就是一条轮廓,而包含的向量是由一个个点构成的.
4.输出向量包含了图像轮廓的拓扑信息,比如这个轮廓的前一个轮廓编号,后一个轮廓编号,父轮廓编号以及子轮廓编号,形式为vectorh,h[0]为后一个轮廓编号 h[1]后一个轮廓编号 h[2]父轮廓编号 h[3]内嵌轮廓编号
5.轮廓的检索模式包含有如下几种选择RETR_EXTERNAL只检测最外围的轮廓RETR_LIST提取所有的轮廓,不建立上下等级关系,只有兄弟等级关系RETR_CCOMP提取所有轮廓,建立为双层结构 RETR_TREE提取所有轮廓,建立网状结构
6.轮廓的近似方法有以下取值 CHAIN_APPROX_NONE获取轮廓的每一个像素,像素的最大间距不超过1 CHAIN_APPROX_SIMPLE压缩水平垂直对角线的元素,只保留该方向的终点坐标(也就是说一条中垂线a-b,中间的点被忽略了) CHAIN_APPROX_TC89_LI使用TEH_CHAIN逼近算法中的LI算法CHAIN_APPROX_TC89_KCOS使用TEH_CHAIN逼近算法中的KCOS算法
7.可选偏移量,对ROI区域中获得的轮廓要在整个图像中分析的时候,该参数可以派上用场
二.图像轮廓绘制
该函数可以很方便的绘制出我们找到的轮廓点集和拓扑结构构成的轮廓图.
API:void drawContours(源图像,轮廓点集,int 绘制指示,scalar 轮廓颜色,int 轮廓粗细,int 轮廓线形,inputarray 轮廓的拓扑结构信息,int 轮廓的最大绘制等级,int 可选的轮廓偏移参数)
注:1.如果绘制指示为负值,则绘制所有轮廓,轮廓粗细默认值为1,为负值的化,绘制在轮廓的内部进行,使用filled,填充轮廓,线条类型默认为8,LINE_AA将绘制抗锯齿的线型,轮廓绘制等级,默认值是INT_MAX,指示最多可以绘制几层轮廓,=.
轮廓检测的基本步骤为1.图像转换为灰度图像,2.图像的滤波,降噪,3.图像的canny二值化或者其他二值化方式,4.寻找轮廓findContours
代码例子如下
//边à?缘|ì轮?廓¤a查¨|找¨°//先¨¨进?行D边à?缘|ì滤?波?§ 然¨?后¨?用??canny算?法¤?§得ì?到ì?二t值|ì化?¥图a?像?//滤?波?§算?法¤?§选?择?均¨′值|ì滤?波?§ 可¨|调ì??的ì?是o?孔?á径?尺?寸??//canny算?法¤?§可¨|调ì??的ì?上|?下?限T和¨asobel算?子á¨?孔?á径?3 5 7
Mat srcImage,blurImage,grayBlurImage,cannyImage,contourImage;
vector>g_vContours;
vectorg_vHierarchy;
RNG g_rng(12345); const int g_BlurMax = 100; intg_BlurValue; void onBlurTrackBar(int pos,void*userData); const int g_sobelSizeMax = 2; //sobel孔?á径?
intg_sobelValue; const int g_lowThresholdMax = 80; //边à?缘|ì检¨?测a低ì¨a阈D值|ì
intg_lowThresholdValue; intg_upThresholdValue; void onTrackBarSobelSize(int pos,void*userData); void onTrackBarLowThresholdSize(int pos,void*userData); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\findContours.jpg");
g_BlurValue= https://www.it610.com/article/4;
g_sobelValue= https://www.it610.com/article/1;
g_lowThresholdValue= https://www.it610.com/article/80;
g_upThresholdValue= https://www.it610.com/article/240;
namedWindow("canny Image");
namedWindow("contours Image");
createTrackbar("blur size value", "canny Image", &g_BlurValue, g_BlurMax,onBlurTrackBar,0);
createTrackbar("sobel size", "canny Image", &g_sobelValue, g_sobelSizeMax,onTrackBarSobelSize,0);
createTrackbar("low threshold", "canny Image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThresholdSize,0);
onBlurTrackBar(g_BlurValue,0);
imshow("src image", srcImage);
moveWindow("src image", 0, 0);
moveWindow("canny Image", srcImage.cols, 0);
moveWindow("contour image", srcImage.cols*2, 0);
waitKey(0); return 0;
【c#|c# opencv 轮廓检测_OPENCV图像轮廓检测】}//修T改?了¢?滤?波?§参?数oy
void onBlurTrackBar(int pos,void*userData)
{int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}//修T改?了¢?sobel孔?á径?参?数oy
void onTrackBarSobelSize(int pos,void*userData)
{int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}//修T改?了¢?阈D值|ì参?数oy
void onTrackBarLowThresholdSize(int pos,void*userData)
{int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}
二.凸包
凸包是指,给定一个二维平面上的点集,凸包就是将这个点集最外层的点连接起来的构成的凸多边形
计算一个物体的凸包,然后计算凸包的凹缺陷,是理解物体轮廓与形状的有效方式,可以用于典型的相似物体查找中.
API:void convertHull(输入二维点集,输出凸包,bool 操作方向标识符,bool 返回点类型)
注:1.操作方向标识符,是指在笛卡尔坐标中,当为true的时候,起始点到结束点顺时针,否则,逆时针.
2返回点类型为真时,返回凸包的各个定点,否则,返回凸包各点的指数,当输出为vector时,这个参数被忽略.
3.返回的二维点集形态类是vector,得到的凸包类型,也是vectorhull,hull.size()是凸包的点的个数.
一般寻找凸包,主要是先对图像二值化,后寻找轮廓,然后寻找一条指定轮廓的凸包.
示例代码如下
//对?一°?张?图a?片?进?行D轮?廓¤a查¨|找¨°,并?é对?每?一°?个?查¨|找¨°出?来¤??的ì?轮?廓¤a运?行D凸a1包?¨1查¨|找¨°程¨?序¨°,生|¨2成¨|最á?终?显?示o?图a?片?//边à?缘|ì轮?廓¤a查¨|找¨°//先¨¨进?行D边à?缘|ì滤?波?§ 然¨?后¨?用??canny算?法¤?§得ì?到ì?二t值|ì化?¥图a?像?//滤?波?§算?法¤?§选?择?均¨′值|ì滤?波?§ 可¨|调ì??的ì?是o?孔?á径?尺?寸??//canny算?法¤?§可¨|调ì??的ì?上|?下?限T和¨asobel算?子á¨?孔?á径?3 5 7
Mat srcImage,blurImage,grayBlurImage,cannyImage,contourImage,hullImage;
vector>g_vContours;
vectorg_vHierarchy;
RNG g_rng(12345); const int g_BlurMax = 100; intg_BlurValue; void onBlurTrackBar(int pos,void*userData); const int g_sobelSizeMax = 2; //sobel孔?á径?
intg_sobelValue; const int g_lowThresholdMax = 80; //边à?缘|ì检¨?测a低ì¨a阈D值|ì
intg_lowThresholdValue; intg_upThresholdValue; void onTrackBarSobelSize(int pos,void*userData); void onTrackBarLowThresholdSize(int pos,void*userData); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\convexHull.jpg");
g_BlurValue= https://www.it610.com/article/4;
g_sobelValue= https://www.it610.com/article/1;
g_lowThresholdValue= https://www.it610.com/article/80;
g_upThresholdValue= https://www.it610.com/article/240;
namedWindow("canny Image");
namedWindow("contours Image");
namedWindow("hull image");
createTrackbar("blur size value", "canny Image", &g_BlurValue, g_BlurMax,onBlurTrackBar,0);
createTrackbar("sobel size", "canny Image", &g_sobelValue, g_sobelSizeMax,onTrackBarSobelSize,0);
createTrackbar("low threshold", "canny Image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThresholdSize,0);
onBlurTrackBar(g_BlurValue,0);
moveWindow("src image", 0, 0);
moveWindow("canny Image", srcImage.cols, srcImage.rows);
moveWindow("contour image", srcImage.cols*2, 0);
moveWindow("hull image", srcImage.cols*2, srcImage.rows);
waitKey(0); return 0;
}//修T改?了¢?滤?波?§参?数oy
void onBlurTrackBar(int pos,void*userData)
{
imshow("src image", srcImage); int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
vector>hull(g_vContours.size());
hullImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3); for(int i = 0; i < g_vContours.size(); i++)
{
convexHull(Mat(g_vContours[i]), hull[i],false);
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(hullImage, hull, i, color,1,8,vector(),0,Point(0,0));
}
imshow("hull image", hullImage);
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}//修T改?了¢?sobel孔?á径?参?数oy
void onTrackBarSobelSize(int pos,void*userData)
{
imshow("src image", srcImage); int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
vector>hull(g_vContours.size());
hullImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3); for(int i = 0; i < g_vContours.size(); i++)
{
convexHull(Mat(g_vContours[i]), hull[i],false);
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(hullImage, hull, i, color,1,8,vector(),0,Point(0,0));
}
imshow("hull image", hullImage);
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}//修T改?了¢?阈D值|ì参?数oy
void onTrackBarLowThresholdSize(int pos,void*userData)
{
imshow("src image", srcImage); int blurSize = g_BlurValue*2+1;
blur(srcImage, blurImage, Size(blurSize,blurSize)); int sobelValue = https://www.it610.com/article/g_sobelValue*2 +3; if (g_lowThresholdValue == 0) {
g_lowThresholdValue= https://www.it610.com/article/1;
}int lowThresholdValue =https://www.it610.com/article/g_lowThresholdValue; int upThresholdValue = lowThresholdValue*3;
cvtColor(blurImage, grayBlurImage, CV_RGB2GRAY); //计?算?canny
Canny(grayBlurImage, cannyImage, lowThresholdValue, upThresholdValue,sobelValue);
contourImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3);
findContours(cannyImage, g_vContours, g_vHierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE,Point(0,0)); for(int i = 0; i
{
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(contourImage, g_vContours, i, color,2,8,g_vHierarchy,0,Point(0,0));
}
vector>hull(g_vContours.size());
hullImage=Mat::zeros(cannyImage.rows, cannyImage.cols, CV_8UC3); for(int i = 0; i < g_vContours.size(); i++)
{
convexHull(Mat(g_vContours[i]), hull[i],false);
Scalar color= Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255));
drawContours(hullImage, hull, i, color,1,8,vector(),0,Point(0,0));
}
imshow("hull image", hullImage);
imshow("contour image", contourImage);
imshow("canny Image",cannyImage);
}
三.对于轮廓代表的二维点集的其他处理方式
获取点集的外围矩形边界
API:Rect boundingRect(输入点集)
返回点集的最小包围矩形
API:RotatedRect minAreaRect(输入点集)
寻找点集的最小包围圆心
API:void minEnclosingCircle(输入点集,Point2f& 圆心,float& 半径)
椭圆拟合二维点集
API:RatatedRect fitEllipse(输入点集)
逼近多边形曲线
API:void approxPolyDp(输入二维点集,输出多边形逼近结果,double epsilon,bool close是否封闭)
注:epsilon为原始曲线和近似曲线之间的最大值
closed为真,则封闭,为假,得到的多边形不封闭
以上各个API使用例程如下
//首o?á先¨¨查¨|找¨°轮?廓¤a,再¨′用??多¨¤边à?形?逼à?近¨1轮?廓¤a//然¨?后¨?依°¨¤靠?多¨¤边à?形?轮?廓¤a获?得ì?包?¨1围?ì多¨¤边à?形?轮?廓¤a的ì?圆2形? 最á?小?矩?形? 矩?形?边à?界?//需¨¨要°a变à?更¨1的ì?只?有?D二t值|ì化?¥操¨′作á??时o?à的ì?阈D值|ì//阈D值|ì最á?大?¨?值|ì 255 最á?小?值|ì可¨|变à?
Mat srcImage,srcCopyImage,srcGrayImage,srcThresholdImage,DstImage;
vector>contours;
vectorhierarchys; const int g_lowThresholdMax = 254; intg_lowThresholdValue; intg_upThresholdValue; void onTrackBarLowThreshold(int pos,void*userData);
RNG g_rng(12345); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\contour.jpg");
srcCopyImage=srcImage.clone(); //转áa化?¥RGB为a灰¨°度¨¨图a?像?
if(srcImage.channels() == 3)
{
cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
}else{
srcGrayImage=srcImage.clone();
}
blur(srcGrayImage, srcGrayImage,Size(3,3));
namedWindow("src image");
namedWindow("threshold image");
namedWindow("dst image");
g_lowThresholdValue= https://www.it610.com/article/80;
g_upThresholdValue= https://www.it610.com/article/255;
createTrackbar("low threshold value", "threshold image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThreshold,0);
onTrackBarLowThreshold(g_lowThresholdValue,0);
imshow("src image", srcImage);
moveWindow("src image", 0, 0);
moveWindow("threshold image", srcImage.cols, 0);
moveWindow("dst image", srcImage.cols*2, 0);
waitKey(0); return 0;
}void onTrackBarLowThreshold(int pos,void*userData)
{if(g_lowThresholdValue =https://www.it610.com/article/= 0)g_lowThresholdValue = 1;
threshold(srcGrayImage, srcThresholdImage, g_lowThresholdValue, g_upThresholdValue, THRESH_BINARY); //二t值|ì化?¥完a¨o成¨|,寻??找¨°轮?廓¤a
findContours(srcThresholdImage, contours, hierarchys, RETR_TREE, CHAIN_APPROX_SIMPLE,Point(0,0)); //多¨¤边à?形?毕à?竟1轮?廓¤a,先¨¨生|¨2成¨|变à?量¢?
vector>contours_polys(contours.size()); //多¨¤边à?形?
vectorboundRect(contours.size()); //轮?廓¤a最á?外aa层?矩?形?边à?界?
vectorcenter(contours.size()); //最á?小?面?积y包?¨1围?ì圆2
vectorradius(contours.size());
DstImage=srcCopyImage.clone(); for(int i = 0; i < contours.size(); i++)
{//逼à?近¨1多¨¤边à?形?
approxPolyDP(Mat(contours[i]), contours_polys[i], 3, true); //逼à?近¨1精?度¨¨3且¨°封¤a闭à?//从?¨?逼à?近¨1到ì?的ì?多¨¤边à?形?得ì?到ì?最á?外aa层?矩?形?
boundRect[i] =boundingRect(Mat(contours_polys[i])); //从?¨?逼à?近¨1的ì?多¨¤边à?形?得ì?到ì?最á?小?圆2形?
minEnclosingCircle(Mat(contours_polys[i]), center[i], radius[i]);
}//先¨¨绘?制?轮?廓¤a
drawContours(DstImage, contours, -1, Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255))); //依°¨¤次??在¨2轮?廓¤a上|?绘?制?矩?形?和¨a圆2形?
for(int i = 0; i < contours.size(); i++)
{
rectangle(DstImage, boundRect[i].tl(), boundRect[i].br(), Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255)),2,8,0);
circle(DstImage, center[i], (int)radius[i], Scalar(g_rng.uniform(0, 255),g_rng.uniform(0, 255),g_rng.uniform(0, 255)),2,8,0);
}
imshow("threshold image", srcThresholdImage);
imshow("dst image", DstImage);
}
四.图像的矩
图像说到底还是一个矩阵,而矩阵在不同的空间大小的情况下,进行分析的时候就需要用到矩阵的矩,也就是图像的矩,矩函数在图像分析中具有重要的作用,模式识别,目标分类,目标识别方位估计,图像编码重构等都需要用到图像的矩.
图像的一阶矩与图像的形状相关,二阶矩显示图像中曲线围绕直线平均值的扩展程度,三阶矩是关于平均值的对称性的测量,由二阶矩和三阶矩可以到处七个不变矩,也即是7Hu不变矩.
不变矩是图像的统计特征,满足平移,旋转,伸缩均不变的不变性,可以用于图像识别.
另外,通过中心矩也可以计算图像的轮廓长度以及面积.
API:Moments moments(源图像,bool 非0像素是否全看做1);
注:源可以是二维数组或者单通道,八位或浮点型
第二个参数默认为false,非零像素不全部看作为1
计算轮廓面积
API:double contourArea(输入,bool 面向区域标识符)
输入输入为向量或者是二维点,也可以说Mat,一般是查找到的图像的轮廓点集或者是根据轮廓点集拟合出的矩形,椭圆,圆,多边形,也可以使图像的凸包vector,返回图像的面积,默认为false,表示返回图像的面积是绝对值,不带符号.
计算轮廓长度
API:double arcLength(输入点集,bool 指示曲线是否封闭)
注:输入点集类型与上一个api一致,默认曲线是封闭的.
测试使用图像的中心矩和opencv提供的算法,来计算图像轮廓面积,代码如下
//图a?像?的ì?矩?//canny算?法¤?§获?得ì?二t值|ì图a?像? 二t值|ì图a?像?获?得ì?边à?缘|ì 边à?缘|ì获?得ì?边à?缘|ì矩? 边à?缘|ì矩?获?得ì?中D心?距¨¤//通a?§过y中D心?距¨¤计?算?图a?像?的ì?轮?廓¤a的ì?面?积y和¨a距¨¤离¤?
Mat srcImage,srcGrayImage,srcBlurImage,srcThresholdImage,srcCopyImage;
vector>contours;
vectorhierarchys; const int g_lowThresholdMax = 85; intg_lowThresholdValue; intg_upThresholdValue; void onTrackBarLowThreshold(int pos,void*userData); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\mement.jpg"); if(srcImage.channels() == 3)
{
cvtColor(srcImage, srcGrayImage, CV_RGB2GRAY);
}else{
srcGrayImage=srcImage.clone();
}
blur(srcGrayImage,srcBlurImage,Size(3,3));
namedWindow("canny image");
g_lowThresholdValue= https://www.it610.com/article/80;
g_upThresholdValue= https://www.it610.com/article/g_lowThresholdValue*3;
createTrackbar("low threshold value", "canny image", &g_lowThresholdValue, g_lowThresholdMax,onTrackBarLowThreshold,0);
onTrackBarLowThreshold(g_lowThresholdValue,0);
imshow("src image", srcImage);
moveWindow("src image", 0, 0);
moveWindow("canny image", srcBlurImage.cols, 0);
moveWindow("dst image", srcBlurImage.cols*2, 0);
waitKey(0); return 0;
}void onTrackBarLowThreshold(int pos,void*userData)
{
Canny(srcBlurImage, srcThresholdImage, g_lowThresholdValue, g_upThresholdValue,3); //二t进?制?图a?像?获?取¨?轮?廓¤a
findContours(srcThresholdImage, contours, hierarchys, RETR_TREE, CHAIN_APPROX_SIMPLE,Point(0,0)); //计?算?矩?
vectormu(contours.size()); for(int i = 0; i < contours.size(); i++)
{
mu[i]= moments(contours[i],false);
}//计?算?中D心?矩?
vectormc(contours.size()); for(int i = 0; i < contours.size(); i++)
{
mc[i]= Point2f(static_cast(mu[i].m10/mu[i].m00),static_cast(mu[i].m01/mu[i].m00));
}//绘?制?轮?廓¤a//srcCopyImage = srcImage.clone();
srcCopyImage = Mat(srcImage.rows,srcImage.cols,CV_8UC1,Scalar::all(0));
drawContours(srcCopyImage, contours,-1, Scalar(255)); for(int i = 0; i < contours.size(); i++)
{
circle(srcCopyImage, mc[i],4, Scalar(255),-1,8,0);
}
imshow("canny image", srcThresholdImage);
imshow("dst image", srcCopyImage); //开a始o?计?算?轮?廓¤a并?é且¨°输o?出?值|ì,通a?§过y矩?和¨aopencv函?¥数oy计?算?出?来¤??的ì?面?积y对?比ਨ
for(int i = 0; i < contours.size(); i++)
{
printf("计?算?轮?廓¤a面?积y以°?及??长?è度¨¨,第ì¨2%d个?轮?廓¤a的ì?面?积y为a(矩?计?算?得ì?出?):%.2f\n通a?§过yopencv函?¥数oy计?算?出?来¤??的ì?面?积y为a%.2f\t长?è度¨¨为a%.2f\n",i,mu[i].m00,contourArea(contours[i],false),arcLength(contours[i],true));
}
}
五.分水岭算法
分水岭算法的主要意义在于分割图像,从背景图像中获得有用信息,比如在一张图像中,前景和背景的像素差异总是很大,此时需要将前景背景分离开来,就需要分水岭算法了.
分水岭算法市一中基于标记的分割算法,表示的是输入图像的极大值点,在把图像传递给函数之前,需要大致勾画出图像中需要分割的区域,这些标记的值可以使用轮廓查找算法和轮廓绘制算法在图像中标记.
最终形成的,是由极值点构成的一个一个的区域,如果图像中目标是连在一起的,分割起来有困难,可以使用该算法将黏着在一起的目标分开
直接用边界来进行分水岭算法的效果不佳,一般来说,先对前后景进行标记,在应用分水岭算法,每个对象内部都是相连的,背景里面的每个像素都不属于任何目标,在应用分水岭算法就会取得较好的效果.
void waterShed(输入图像,图像掩码);
注:输入图像必须为八位三通道彩色图像,掩码是运算结果,32位单通道图像,和源图有一样的尺寸和类型,为啥为32位呢,因为一张图像完全可能被分成不止255个区域,那八位就不够用了呀.
具体效果看如下代码例程
//分水岭算法waterShed
//在源图像中绘制区域线条,同时把绘制的区域线条保存在mask中,然后对mask进行
//轮廓查找算法,找到轮廓以后,在新的掩码中按照不同的轮廓绘制不同的灰度值
//调用分水岭算法,根据结果的不同灰度进行着色,最终的图像和源图像混合生成最终图像
//这就是分水岭算法的意义
//分水岭算法配合膨胀 腐蚀等形态学运算,效果应该很好
Mat srcImage,srcImageCopy;
Mat maskImage,maskImageCopy; booldraw;
Point2i prevPoint; //记?录?前??一°?个?鼠o¨?标à¨o事o?件t点ì?的ì?位?置?
RNG g_rng(12345); void onMouseEvent(int event,int x,int y,int flag,void*userData); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\waterShed.jpg");
imshow("src image", srcImage);
prevPoint= Point2i(-1,-1);
srcImageCopy=srcImage.clone();
maskImage= Mat(srcImage.rows,srcImage.cols,CV_32SC1,Scalar::all(0));
maskImageCopy=maskImage.clone();
setMouseCallback("src image", onMouseEvent);
imshow("dst image", maskImage);
moveWindow("src image", 0, 0);
moveWindow("dst image", srcImage.cols, 0);
draw= false; int keyValue = https://www.it610.com/article/0; do{
keyValue= https://www.it610.com/article/waitKey(30); if(keyValue =='1')//清?除y图a?像?
{
draw= false;
srcImageCopy=srcImage.clone();
maskImageCopy=maskImage.clone();
prevPoint= Point2i(-1,-1);
imshow("src image", srcImageCopy);
imshow("dst image", maskImageCopy);
}else if(keyValue =https://www.it610.com/article/='2')
{//开a始o?计?算?
if(draw == true)//有?D轮?廓¤a
{
vector>contours;
vectorhierarchy;
findContours(maskImageCopy, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); //检¨?查¨|轮?廓¤a
if(contours.size() == 0)continue; //找¨°出?轮?廓¤a以°?后¨?将?轮?廓¤a绘?制?在¨2源??mask上|?//对?不?同a?的ì?轮?廓¤a,用??不?同a?的ì?值|ì进?行D区?分¤?
Mat maskDstImage = Mat(maskImageCopy.size(),CV_32SC1,Scalar::all(0)); int c_comp = 0; for(int i = 0 ; i >= 0; i = hierarchy[i][0],c_comp++)
{//hierarchy[i][0]对?应?|后¨?一°?个?轮?廓¤a//绘?制?轮?廓¤a,轮?廓¤a的ì?值|ì依°¨¤次??为a1 2 3 4 5
drawContours(maskDstImage, contours, i, Scalar::all(c_comp+1),-1,8,hierarchy,INT_MAX);
}//此??时o?à,一°?共2有?Dc_comp个?轮?廓¤a点ì?,生|¨2成¨|scalar数oy组á¨|,用??于?¨2后¨?期¨2着á?色|?
vectorcolorTab; for (int i = 0; i < c_comp; i++) {int r = g_rng.uniform(0, 255); int g = g_rng.uniform(0, 255); int b = g_rng.uniform(0, 255);
colorTab.push_back(Vec3b((uchar)r,(uchar)g,(uchar)b));
}//进?行D分¤?水?岭¢?处?|理¤¨a算?法¤?§
double startTime = (double)getTickCount();
watershed(srcImageCopy, maskDstImage); double endTime = (double)getTickCount();
printf("算?法¤?§使o1用??时o?à间?为a%.2f \r\n",((endTime-startTime)*1000)/getTickFrequency()); //得ì?到ì?了¢?分¤?水?岭¢?图a?像?以°?后¨?,按???照?colortab的ì?内¨2容¨Y,对?分¤?水?岭¢?图a?像?进?行D区?域?¨°着á?色|?//分¤?水?岭¢?算?法¤?§的ì?处?|理¤¨a结¨¢果?存??放¤?在¨2maskdstImage中D
Mat dstImage(maskDstImage.size(),CV_8UC3); for(int i = 0 ; i< maskDstImage.rows; i++)
{for (int j = 0; j < maskDstImage.cols; j++) {int index = maskDstImage.at(i,j); if(index == -1)
{
dstImage.at(i,j) = Vec3b(255,255,255);
}else if(index < 0 || index >c_comp)
{
dstImage.at(i,j) = Vec3b(0,0,0);
}else{
dstImage.at(i,j) = colorTab[index-1];
}
}
}//再¨′把??源??图a?像?和¨a得ì?到ì?的ì?掩¨2码?图a?像?混¨?合?显?示o?新?的ì?图a?片?
addWeighted(srcImage, 0.5, dstImage, 0.5, 0.0, dstImage); //这a里¤?就¨a已°?经-得ì?到ì?最á?终?的ì?maskdst,显?示o?看??看??
imshow("dst image", dstImage);
}else{continue;
}
}
}while(keyValue != 27); return 0;
}void onMouseEvent(int event,int x,int y,int flag,void*userData)
{if(x < 0||y< 0||x>=srcImage.cols||y>=srcImage.rows)return; if(event ==EVENT_LBUTTONDOWN)
{
prevPoint=Point2i(x,y);
}else if(event ==EVENT_LBUTTONUP)
{
prevPoint= Point2i(-1,-1);
}else if(event ==EVENT_MOUSEMOVE)
{if(flag&EVENT_FLAG_LBUTTON)
{//鼠o¨?标à¨o左á¨?键¨1滑?动?¥
draw = true;
line(srcImageCopy, prevPoint,Point2i(x,y), Scalar::all(255),4,LINE_AA);
line(maskImageCopy,prevPoint,Point2i(x,y), Scalar::all(INT_MAX),4,LINE_AA);
prevPoint=Point2i(x,y);
imshow("dst image", maskImageCopy);
imshow("src image", srcImageCopy);
}
}
}
六.图像修复
图像修复是指利用那些已经被破坏区域的边沿,即边缘的颜色和结构,通过对它的繁殖和混合,填充的被损坏区域中去,已达到图像修补的目的,注意,边缘损坏过多的图像也是难以修复的,对于小块破损比较有效
API:void inPaint(原图片,修复区域掩码,输出图片,double inpaintRadius修复算法参考半径,int 修复算法标识符)
注:1.原图片必须为八位单通道或者是三通道图片,掩码是八位单通道图片,其中那个非零的那些像素点表示需要修复的区域,输出图像必须和源图像有一样的尺寸和类型,最小修复半径修复的每个店所参考的周围区域颜色的半径.
2.修复算法标识符表示用什么修复算法进行计算,取值INPAINT_NS基于nerier_stokes方法 ALEXANDRU_TELEA另一种方法.
修复图片的例子见面的代码
//图a?像?修T补1//需¨¨要°a原-图a? 掩¨2码?图a? 目?标à¨o图a?//修T复??算?法¤?§参?考?半??径?为a3 修T复??方¤?法¤?§为a INPAINT_TELEA
Mat srcImage,srcImageCopy;
Mat maskImage,maskImageCopy; booldraw;
Point2i prevPoint; //记?录?前??一°?个?鼠o¨?标à¨o事o?件t点ì?的ì?位?置?
void onMouseEvent(int event,int x,int y,int flag,void*userData); int main(int argc,char*argv[])
{
srcImage= imread("F:\\opencv\\OpenCVImage\\inpaint.jpg");
imshow("src image", srcImage);
prevPoint= Point2i(-1,-1);
srcImageCopy=srcImage.clone();
maskImage= Mat(srcImage.rows,srcImage.cols,CV_8UC1,Scalar::all(0));
maskImageCopy=maskImage.clone();
setMouseCallback("src image", onMouseEvent);
imshow("dst image", maskImage);
moveWindow("src image", 0, 0);
moveWindow("dst image", srcImage.cols, 0);
draw= false; int keyValue = https://www.it610.com/article/0; do{
keyValue= https://www.it610.com/article/waitKey(30); if(keyValue =='1')
{//清?除y
draw = false;
srcImageCopy=srcImage.clone();
maskImageCopy=maskImage.clone();
prevPoint= Point2i(-1,-1);
imshow("src image", srcImageCopy);
imshow("dst image", maskImageCopy);
}else if(keyValue =https://www.it610.com/article/='2')
{//修T复??
if(draw == true)
{
Mat srcImageInpaint=srcImageCopy.clone();
Mat dstImage= Mat(srcImageInpaint.size(),CV_8UC3,Scalar::all(0));
inpaint(srcImageInpaint, maskImageCopy, dstImage,3, INPAINT_TELEA);
srcImageCopy=srcImage.clone();
maskImageCopy=maskImage.clone();
prevPoint= Point2i(-1,-1);
imshow("dst image", dstImage);
}
}
}while (keyValue != 27); return 0;
}void onMouseEvent(int event,int x,int y,int flag,void*userData)
{if(x < 0||y< 0||x>=srcImage.cols||y>=srcImage.rows)return; if(event ==EVENT_LBUTTONDOWN)
{
prevPoint=Point2i(x,y);
}else if(event ==EVENT_LBUTTONUP)
{
prevPoint= Point2i(-1,-1);
}else if(event ==EVENT_MOUSEMOVE)
{if(flag&EVENT_FLAG_LBUTTON)
{//鼠o¨?标à¨o左á¨?键¨1滑?动?¥
draw = true;
line(srcImageCopy, prevPoint,Point2i(x,y), Scalar::all(255),4,LINE_AA);
line(maskImageCopy,prevPoint,Point2i(x,y), Scalar::all(INT_MAX),4,LINE_AA);
prevPoint=Point2i(x,y);
imshow("dst image", maskImageCopy);
imshow("src image", srcImageCopy);
}
}
}

    推荐阅读