regionCrops vs findContours

Existe-t-il un moyen d’obtenir les mêmes résultats pour cDist = regionprops (bwImg, ‘Area’); et findContours d’OpenCV?

Image d’entrée: entrez la description de l'image ici

Bw Image d’entrée: entrez la description de l'image ici

Voici ce que j’ai essayé jusqu’à présent:

dst.convertTo(dst,CV_8U); cv::vector<cv::vector > contours_1; cv::vector hierarchy_1; cv::findContours(dst,contours_1,hierarchy_1,CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); double maxLabelSize = (dst.rows/4.0) * (dst.cols/6.0); double minLabelSize = ((dst.rows/40.0) * (dst.cols/60.0)); cv::vector<cv::vector > goodContours; for (int i = 0; i < contours_1.size(); i++) { double size = cv::contourArea(contours_1[i]); if (size  minLabelSize) { goodContours.push_back(contours_1[i]); } } cv::Mat filterContours = cv::Mat::zeros(dst.size(),CV_8UC3); for (int i = 0; i < goodContours.size(); i++) { cv::RNG rng(12345); cv::Scalar color = cv::Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) ); drawContours( filterContours, goodContours, i, color, 2, 8, hierarchy_1, 0, cv::Point() ); } cv::imshow( "Contours", filterContours ); cv::waitKey(0); 

Résultat OpenCV: Résultat d'OpenCV

Version de Matlab:

 % Calculate each separated object area cDist=regionprops(bwImg, 'Area'); cDist=[cDist.Area]; % Label each object [bwImgLabeled, ~]=bwlabel(bwImg); % Calculate min and max object size based on assumptions maxLabelSize = prod(size(imageData)./[4 6]); minLabelSize = prod(size(imageData)./[4 6]./10); % Find label indices for objects that are too large or too small remInd = find(cDist > maxLabelSize); remInd = [remInd find(cDist < minLabelSize)]; % Remove over/undersized objects for n=1:length(remInd) ri = bwImgLabeled == remInd(n); bwImgLabeled(ri) = 0; end 

Résultat de Matlab: Résultat de Matlab

Veuillez noter que le carré en bas à gauche est manquant dans l’image openCV.

La version bêta d’OpenCV 3.0 a la fonction “composants connectés”. Cette fonction crée une image de label et calcule les propriétés de certaines régions: surface, cadre de sélection et centroïdes

Dans le cas d’OpenCV 2.4, vous pouvez inclure connectedcomponents.cpp à partir du code source OpenCV actuel dans votre projet et utiliser la fonction “connectedComponentsWithStats”:

 nLabels = connectedComponentsWithStats(mask, labelImage, stats, centroids, connectivity, CV_32S); 

Variables:

  • nLabels – nombre de régions;

  • masque – image binary saisie;

  • labelImage – image de sortie avec des régions étiquetées;

  • stats – propriétés de la région (cadre de sélection, zone);

  • les centroïdes;

  • connectivité – connectivité des régions (4 ou 8).

connectedcomponents.cpp:

 #include "precomp.hpp" #include  namespace cv{ namespace connectedcomponents{ struct NoOp{ NoOp(){ } void init(int /*labels*/){ } inline void operator()(int r, int c, int l){ (void) r; (void) c; (void) l; } void finish(){} }; struct Point2ui64{ uint64 x, y; Point2ui64(uint64 _x, uint64 _y):x(_x), y(_y){} }; struct CCStatsOp{ const _OutputArray* _mstatsv; cv::Mat statsv; const _OutputArray* _mcentroidsv; cv::Mat centroidsv; std::vector integrals; CCStatsOp(OutputArray _statsv, OutputArray _centroidsv): _mstatsv(&_statsv), _mcentroidsv(&_centroidsv){ } inline void init(int nlabels){ _mstatsv->create(cv::Size(CC_STAT_MAX, nlabels), cv::DataType::type); statsv = _mstatsv->getMat(); _mcentroidsv->create(cv::Size(2, nlabels), cv::DataType::type); centroidsv = _mcentroidsv->getMat(); for(int l = 0; l < (int) nlabels; ++l){ int *row = (int *) &statsv.at(l, 0); row[CC_STAT_LEFT] = INT_MAX; row[CC_STAT_TOP] = INT_MAX; row[CC_STAT_WIDTH] = INT_MIN; row[CC_STAT_HEIGHT] = INT_MIN; row[CC_STAT_AREA] = 0; } integrals.resize(nlabels, Point2ui64(0, 0)); } void operator()(int r, int c, int l){ int *row = &statsv.at(l, 0); row[CC_STAT_LEFT] = MIN(row[CC_STAT_LEFT], c); row[CC_STAT_WIDTH] = MAX(row[CC_STAT_WIDTH], c); row[CC_STAT_TOP] = MIN(row[CC_STAT_TOP], r); row[CC_STAT_HEIGHT] = MAX(row[CC_STAT_HEIGHT], r); row[CC_STAT_AREA]++; Point2ui64 &integral = integrals[l]; integral.x += c; integral.y += r; } void finish(){ for(int l = 0; l < statsv.rows; ++l){ int *row = &statsv.at(l, 0); row[CC_STAT_WIDTH] = row[CC_STAT_WIDTH] - row[CC_STAT_LEFT] + 1; row[CC_STAT_HEIGHT] = row[CC_STAT_HEIGHT] - row[CC_STAT_TOP] + 1; Point2ui64 &integral = integrals[l]; double *centroid = &centroidsv.at(l, 0); double area = ((unsigned*)row)[CC_STAT_AREA]; centroid[0] = double(integral.x) / area; centroid[1] = double(integral.y) / area; } } }; //Find the root of the tree of node i template inline static LabelT findRoot(const LabelT *P, LabelT i){ LabelT root = i; while(P[root] < root){ root = P[root]; } return root; } //Make all nodes in the path of node i point to root template inline static void setRoot(LabelT *P, LabelT i, LabelT root){ while(P[i] < i){ LabelT j = P[i]; P[i] = root; i = j; } P[i] = root; } //Find the root of the tree of the node i and compress the path in the process template inline static LabelT find(LabelT *P, LabelT i){ LabelT root = findRoot(P, i); setRoot(P, i, root); return root; } //unite the two trees containing nodes i and j and return the new root template inline static LabelT set_union(LabelT *P, LabelT i, LabelT j){ LabelT root = findRoot(P, i); if(i != j){ LabelT rootj = findRoot(P, j); if(root > rootj){ root = rootj; } setRoot(P, j, root); } setRoot(P, i, root); return root; } //Flatten the Union Find tree and relabel the components template inline static LabelT flattenL(LabelT *P, LabelT length){ LabelT k = 1; for(LabelT i = 1; i < length; ++i){ if(P[i] < i){ P[i] = P[P[i]]; }else{ P[i] = k; k = k + 1; } } return k; } //Based on "Two Strategies to Speed up Connected Components Algorithms", the SAUF (Scan array union find) variant //using decision trees //Kesheng Wu, et al //Note: rows are encoded as position in the "rows" array to save lookup times //reference for 4-way: {{-1, 0}, {0, -1}};//b, d neighborhoods const int G4[2][2] = {{1, 0}, {0, -1}};//b, d neighborhoods //reference for 8-way: {{-1, -1}, {-1, 0}, {-1, 1}, {0, -1}};//a, b, c, d neighborhoods const int G8[4][2] = {{1, -1}, {1, 0}, {1, 1}, {0, -1}};//a, b, c, d neighborhoods template struct LabelingImpl{ LabelT operator()(const cv::Mat &I, cv::Mat &L, int connectivity, StatsOp &sop){ CV_Assert(L.rows == I.rows); CV_Assert(L.cols == I.cols); CV_Assert(connectivity == 8 || connectivity == 4); const int rows = L.rows; const int cols = L.cols; //A quick and dirty upper bound for the maximimum number of labels. The 4 comes from //the fact that a 3x3 block can never have more than 4 unique labels for both 4 & 8-way const size_t Plength = 4 * (size_t(rows + 3 - 1)/3) * (size_t(cols + 3 - 1)/3); LabelT *P = (LabelT *) fastMalloc(sizeof(LabelT) * Plength); P[0] = 0; LabelT lunique = 1; //scanning phase for(int r_i = 0; r_i < rows; ++r_i){ LabelT * const Lrow = L.ptr(r_i); LabelT * const Lrow_prev = (LabelT *)(((char *)Lrow) - L.step.p[0]); const PixelT * const Irow = I.ptr(r_i); const PixelT * const Irow_prev = (const PixelT *)(((char *)Irow) - I.step.p[0]); LabelT *Lrows[2] = { Lrow, Lrow_prev }; const PixelT *Irows[2] = { Irow, Irow_prev }; if(connectivity == 8){ const int a = 0; const int b = 1; const int c = 2; const int d = 3; const bool T_a_r = (r_i - G8[a][0]) >= 0; const bool T_b_r = (r_i - G8[b][0]) >= 0; const bool T_c_r = (r_i - G8[c][0]) >= 0; for(int c_i = 0; Irows[0] != Irow + cols; ++Irows[0], c_i++){ if(!*Irows[0]){ Lrow[c_i] = 0; continue; } Irows[1] = Irow_prev + c_i; Lrows[0] = Lrow + c_i; Lrows[1] = Lrow_prev + c_i; const bool T_a = T_a_r && (c_i + G8[a][1]) >= 0 && *(Irows[G8[a][0]] + G8[a][1]); const bool T_b = T_b_r && *(Irows[G8[b][0]] + G8[b][1]); const bool T_c = T_c_r && (c_i + G8[c][1]) < cols && *(Irows[G8[c][0]] + G8[c][1]); const bool T_d = (c_i + G8[d][1]) >= 0 && *(Irows[G8[d][0]] + G8[d][1]); //decision tree if(T_b){ //copy(b) *Lrows[0] = *(Lrows[G8[b][0]] + G8[b][1]); }else{//not b if(T_c){ if(T_a){ //copy(c, a) *Lrows[0] = set_union(P, *(Lrows[G8[c][0]] + G8[c][1]), *(Lrows[G8[a][0]] + G8[a][1])); }else{ if(T_d){ //copy(c, d) *Lrows[0] = set_union(P, *(Lrows[G8[c][0]] + G8[c][1]), *(Lrows[G8[d][0]] + G8[d][1])); }else{ //copy(c) *Lrows[0] = *(Lrows[G8[c][0]] + G8[c][1]); } } }else{//not c if(T_a){ //copy(a) *Lrows[0] = *(Lrows[G8[a][0]] + G8[a][1]); }else{ if(T_d){ //copy(d) *Lrows[0] = *(Lrows[G8[d][0]] + G8[d][1]); }else{ //new label *Lrows[0] = lunique; P[lunique] = lunique; lunique = lunique + 1; } } } } } }else{ //B & D only const int b = 0; const int d = 1; const bool T_b_r = (r_i - G4[b][0]) >= 0; for(int c_i = 0; Irows[0] != Irow + cols; ++Irows[0], c_i++){ if(!*Irows[0]){ Lrow[c_i] = 0; continue; } Irows[1] = Irow_prev + c_i; Lrows[0] = Lrow + c_i; Lrows[1] = Lrow_prev + c_i; const bool T_b = T_b_r && *(Irows[G4[b][0]] + G4[b][1]); const bool T_d = (c_i + G4[d][1]) >= 0 && *(Irows[G4[d][0]] + G4[d][1]); if(T_b){ if(T_d){ //copy(d, b) *Lrows[0] = set_union(P, *(Lrows[G4[d][0]] + G4[d][1]), *(Lrows[G4[b][0]] + G4[b][1])); }else{ //copy(b) *Lrows[0] = *(Lrows[G4[b][0]] + G4[b][1]); } }else{ if(T_d){ //copy(d) *Lrows[0] = *(Lrows[G4[d][0]] + G4[d][1]); }else{ //new label *Lrows[0] = lunique; P[lunique] = lunique; lunique = lunique + 1; } } } } } //analysis LabelT nLabels = flattenL(P, lunique); sop.init(nLabels); for(int r_i = 0; r_i < rows; ++r_i){ LabelT *Lrow_start = L.ptr(r_i); LabelT *Lrow_end = Lrow_start + cols; LabelT *Lrow = Lrow_start; for(int c_i = 0; Lrow != Lrow_end; ++Lrow, ++c_i){ const LabelT l = P[*Lrow]; *Lrow = l; sop(r_i, c_i, l); } } sop.finish(); fastFree(P); return nLabels; }//End function LabelingImpl operator() };//End struct LabelingImpl }//end namespace connectedcomponents //L's type must have an appropriate depth for the number of pixels in I template static int connectedComponents_sub1(const cv::Mat &I, cv::Mat &L, int connectivity, StatsOp &sop){ CV_Assert(L.channels() == 1 && I.channels() == 1); CV_Assert(connectivity == 8 || connectivity == 4); int lDepth = L.depth(); int iDepth = I.depth(); using connectedcomponents::LabelingImpl; //warn if L's depth is not sufficient? CV_Assert(iDepth == CV_8U || iDepth == CV_8S); if(lDepth == CV_8U){ return (int) LabelingImpl()(I, L, connectivity, sop); }else if(lDepth == CV_16U){ return (int) LabelingImpl()(I, L, connectivity, sop); }else if(lDepth == CV_32S){ //note that signed types don't really make sense here and not being able to use unsigned matters for scientific projects //OpenCV: how should we proceed? .at typechecks in debug mode return (int) LabelingImpl()(I, L, connectivity, sop); } CV_Error(CV_StsUnsupportedFormat, "unsupported label/image type"); return -1; } } int cv::connectedComponents(InputArray _img, OutputArray _labels, int connectivity, int ltype){ const cv::Mat img = _img.getMat(); _labels.create(img.size(), CV_MAT_DEPTH(ltype)); cv::Mat labels = _labels.getMat(); connectedcomponents::NoOp sop; if(ltype == CV_16U){ return connectedComponents_sub1(img, labels, connectivity, sop); }else if(ltype == CV_32S){ return connectedComponents_sub1(img, labels, connectivity, sop); }else{ CV_Error(CV_StsUnsupportedFormat, "the type of labels must be 16u or 32s"); return 0; } } int cv::connectedComponentsWithStats(InputArray _img, OutputArray _labels, OutputArray statsv, OutputArray centroids, int connectivity, int ltype) { const cv::Mat img = _img.getMat(); _labels.create(img.size(), CV_MAT_DEPTH(ltype)); cv::Mat labels = _labels.getMat(); connectedcomponents::CCStatsOp sop(statsv, centroids); if(ltype == CV_16U){ return connectedComponents_sub1(img, labels, connectivity, sop); }else if(ltype == CV_32S){ return connectedComponents_sub1(img, labels, connectivity, sop); }else{ CV_Error(CV_StsUnsupportedFormat, "the type of labels must be 16u or 32s"); return 0; } } 

Utilisez le code ci-dessous pour obtenir les étiquettes des composants connectés (fonctionne de la même façon que bwlabel de matlab). Les fonctions de recherche et de comparaison de Matlab sont différentes. Prenez le temps de travailler dessus. En attendant, le code ci-dessous résoudra temporairement votre problème. (Familiarisez-vous avec findcontours – essayez de jouer avec le mode de récupération de contour et la méthode d’approximation de contour – Réf )

 void bwlabelMat(Mat &binary, vector> &lablidx, int &labels) { if (binary.type() != CV_32F) { cout << "convert the input image to CV_32FC1 with 0 & 1 as pixel elements" << endl; exit(EXIT_FAILURE); } // starts at 2 because 0,1 are used already int labelCount = 2; for (int y = 0; y < binary.rows; y++) { for (int x = 0; x < binary.cols; x++) { if (1 == (int)binary.at(y, x)) { Rect rect; floodFill(binary, Point(x, y), Scalar(labelCount), &rect, Scalar(0), Scalar(0), 4); vector  blob; for (int i = rect.y; i < (rect.y + rect.height); i++) { for (int j = rect.x; j < (rect.x + rect.width); j++) { if (labelCount == (int)binary.at(i, j)) { blob.push_back(Point(j, i)); } } } lablidx.push_back(blob); labelCount++; } } } for (int y = 0; y < binary.rows; y++) { for (int x = 0; x < binary.cols; x++) { if ((0 != (int)binary.at(y, x)) && (1 != (int)binary.at(y, x))) binary.at(y, x) = binary.at(y, x) - 1.0; } } labelCount = labelCount - 2; labels = labelCount; } 

En ce qui concerne: regionprops of matlab: Opencv en tant que tel n’a pas d’équivalent pour regionprops mais il est tout de même possible de reproduire les résultats exacts, car cela implique simplement des calculs en retard. Je vous partage un lien car je ne peux pas poster le code complètement ici. J’ai référé cette implémentation en python et reproduit en c ++. Ça fonctionne bien. Aller de l’avant.

Lien: caractéristiques de contour

J’espère que ça aide.