更新:
我发现Link使用OpenCV找出方形.我可以修改它以找到矩形形状吗?有人可以指导我吗?
更新的最新消息:
我终于得到了代码,下面是它.
- (cv::Mat)cvMatWithImage:(UIImage *)image{ CGcolorSpaceRef colorSpace = CGImageGetcolorSpace(image.CGImage); CGfloat cols = image.size.wIDth; CGfloat rows = image.size.height; cv::Mat cvMat(rows,cols,CV_8UC4); // 8 bits per component,4 channels CGContextRef contextRef = CGBitmapContextCreate(cvMat.data,// Pointer to backing data cols,// WIDth of bitmap rows,// Height of bitmap 8,// Bits per component cvMat.step[0],// Bytes per row colorSpace,// colorspace kCGImageAlphaNoneskipLast | kCGBitmapByteOrderDefault); // Bitmap info flags CGContextDrawImage(contextRef,CGRectMake(0,rows),image.CGImage); CGContextRelease(contextRef); return cvMat;}-(UIImage *)UIImageFromCVMat:(cv::Mat)cvMat{ NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()]; CGcolorSpaceRef colorSpace; if ( cvMat.elemSize() == 1 ) { colorSpace = CGcolorSpaceCreateDeviceGray(); } else { colorSpace = CGcolorSpaceCreateDeviceRGB(); } //CFDataRef data; CGDataProvIDerRef provIDer = CGDataProvIDerCreateWithCFData( (CFDataRef) data ); // It SHOulD BE (__brIDge CFDataRef)data CGImageRef imageRef = CGImageCreate( cvMat.cols,cvMat.rows,8,8 * cvMat.elemSize(),cvMat.step[0],colorSpace,kCGImageAlphaNone|kCGBitmapByteOrderDefault,provIDer,NulL,false,kCGRenderingIntentDefault ); UIImage *finalimage = [UIImage imageWithCGImage:imageRef]; CGImageRelease( imageRef ); CGDataProvIDerRelease( provIDer ); CGcolorSpaceRelease( colorSpace ); return finalimage;}-(voID)forOpenCV{ imageVIEw = [UIImage imagenamed:@"myimage.jpg"]; if( imageVIEw != nil ) { cv::Mat tempMat = [imageVIEw CVMat]; cv::Mat greyMat = [self cvMatWithImage:imageVIEw]; cv::vector<cv::vector<cv::Point> > squares; cv::Mat img= [self deBUGSquares: squares: greyMat]; imageVIEw = [self UIImageFromCVMat: img]; self.imageVIEw.image = imageVIEw; }}double angle( cv::Point pt1,cv::Point pt2,cv::Point pt0 ) { double dx1 = pt1.x - pt0.x; double dy1 = pt1.y - pt0.y; double dx2 = pt2.x - pt0.x; double dy2 = pt2.y - pt0.y; return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);}- (cv::Mat) deBUGSquares: (std::vector<std::vector<cv::Point> >) squares : (cv::Mat &)image{ NSLog(@"%lu",squares.size()); // blur will enhance edge detection //cv::Mat blurred(image); cv::Mat blurred = image.clone(); medianBlur(image,blurred,9); cv::Mat gray0(image.size(),CV_8U),gray; cv::vector<cv::vector<cv::Point> > contours; // find squares in every color plane of the image for (int c = 0; c < 3; c++) { int ch[] = {c,0}; mixChannels(&image,1,&gray0,ch,1); // try several threshold levels const int threshold_level = 2; for (int l = 0; l < threshold_level; L++) { // Use Canny instead of zero threshold level! // Canny helps to catch squares with gradIEnt shading if (l == 0) { Canny(gray0,gray,10,20,3); // // Dilate helps to remove potential holes between edge segments dilate(gray,cv::Mat(),cv::Point(-1,-1)); } else { gray = gray0 >= (l+1) * 255 / threshold_level; } // Find contours and store them in a List findContours(gray,contours,CV_RETR_List,CV_CHAIN_APPROX_SIMPLE); // Test contours cv::vector<cv::Point> approx; for (size_t i = 0; i < contours.size(); i++) { // approximate contour with accuracy proportional // to the contour perimeter approxpolyDP(cv::Mat(contours[i]),approx,arcLength(cv::Mat(contours[i]),true)*0.02,true); // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orIEntation if (approx.size() == 4 && fabs(contourArea(cv::Mat(approx))) > 1000 && isContourConvex(cv::Mat(approx))) { double maxCosine = 0; for (int j = 2; j < 5; j++) { double cosine = fabs(angle(approx[j%4],approx[j-2],approx[j-1])); maxCosine = MAX(maxCosine,cosine); } if (maxCosine < 0.3) squares.push_back(approx); } } } } NSLog(@"squares.size(): %lu",squares.size()); for( size_t i = 0; i < squares.size(); i++ ) { cv::Rect rectangle = boundingRect(cv::Mat(squares[i])); NSLog(@"rectangle.x: %d",rectangle.x); NSLog(@"rectangle.y: %d",rectangle.y); if(i==squares.size()-1)////Detecting Rectangle here { const cv::Point* p = &squares[i][0]; int n = (int)squares[i].size(); NSLog(@"%d",n); line(image,cv::Point(507,418),cv::Point(507+1776,418+1372),cv::Scalar(255,0),2,8); polylines(image,&p,&n,true,255,5,CV_AA); int fx1=rectangle.x; NSLog(@"X: %d",fx1); int fy1=rectangle.y; NSLog(@"Y: %d",fy1); int fx2=rectangle.x+rectangle.wIDth; NSLog(@"WIDth: %d",fx2); int fy2=rectangle.y+rectangle.height; NSLog(@"Height: %d",fy2); line(image,cv::Point(fx1,fy1),cv::Point(fx2,fy2),cv::Scalar(0,255),8); } } return image;}
谢谢.
解决方法 下面是一个完整的答案,使用一个小的包装类将c与objective-c代码分开.我不得不使用raise another question on stackoverflow来处理我糟糕的c知识 – 但是我已经找到了我们需要的所有东西,以使用objective.cpp示例代码作为示例干净地与Objective-c代码进行交互.目的是保持原始c代码尽可能保持原始状态,并将openCV的大部分工作保留在纯c文件中以实现(im)可移植性.
我已将原来的答案留在原处,因为这似乎超出了编辑范围. The complete demo project is on github
CVVIEwController.h / CVVIEwController.m
>纯Objective-C
>通过WRAPPER与openCV c代码通信……它既不知道也不关心c正在处理包装器后面的这些方法调用.
CVWrapper.h / CVWrapper.mm
>目标-C
做得尽可能少,真的只有两件事……
>调用UIImage objC类别以转换为UIImage和来自UIImage<> CV ::垫
>介于CVVIEwController的obj-C方法和CVSquares c(类)函数调用之间
CVSquares.h / CVSquares.cpp
>纯C
> CVSquares.cpp在类定义中声明公共函数(在本例中为一个静态函数).
这取代了原始文件中main {}的工作.
>我们尽量保持CVSquares.cpp尽可能接近C原件以便于移植.
CVVIEwController.m
//remove 'magic numbers' from original C++ source so we can manipulate them from obj-C#define TolERANCE 0.01#define THRESHolD 50#define LEVELS 9UIImage* image = [CVSquaresWrapper detectedSquaresInImage:self.image tolerance:TolERANCE threshold:THRESHolD levels:LEVELS];
CVSquaresWrapper.h
// CVSquaresWrapper.h#import <Foundation/Foundation.h>@interface CVSquaresWrapper : NSObject+ (UIImage*) detectedSquaresInImage:(UIImage*)image tolerance:(CGfloat)tolerance threshold:(NSInteger)threshold levels:(NSInteger)levels;@end
CVSquaresWrapper.mm
// CVSquaresWrapper.mm// wrapper that talks to c++ and to obj-c classes#import "CVSquaresWrapper.h"#import "CVSquares.h"#import "UIImage+OpenCV.h"@implementation CVSquaresWrapper+ (UIImage*) detectedSquaresInImage:(UIImage*) image tolerance:(CGfloat)tolerance threshold:(NSInteger)threshold levels:(NSInteger)levels{ UIImage* result = nil; //convert from UIImage to cv::Mat openCV image format //this is a category on UIImage cv::Mat matimage = [image CVMat]; //call the c++ class static member function //we want this function signature to exactly //mirror the form of the calling method matimage = CVSquares::detectedSquaresInImage (matimage,tolerance,threshold,levels); //convert back from cv::Mat openCV image format //to UIImage image format (category on UIImage) result = [UIImage imageFromCVMat:matimage]; return result;}@end
CVSquares.h
// CVSquares.h#ifndef __OpenCVClIEnt__CVSquares__#define __OpenCVClIEnt__CVSquares__ //class deFinition //in this example we do not need a class //as we have no instance variables and just one static function. //We Could instead just declare the function but this form seems clearerclass CVSquares{public: static cv::Mat detectedSquaresInImage (cv::Mat image,float tol,int threshold,int levels);};#endif /* defined(__OpenCVClIEnt__CVSquares__) */
CVSquares.cpp
// CVSquares.cpp#include "CVSquares.h"using namespace std;using namespace cv;static int thresh = 50,N = 11;static float tolerance = 0.01; //declarations added so that we can move our //public function to the top of the filestatic voID findSquares( const Mat& image,vector<vector<Point> >& squares );static voID drawSquares( Mat& image,vector<vector<Point> >& squares ); //this public function performs the role of //main{} in the original file (main{} is deleted)cv::Mat CVSquares::detectedSquaresInImage (cv::Mat image,int levels){ vector<vector<Point> > squares; if( image.empty() ) { cout << "Couldn't load " << endl; } tolerance = tol; thresh = threshold; N = levels; findSquares(image,squares); drawSquares(image,squares); return image;}// the rest of this file is IDentical to the original squares.cpp except:// main{} is removed// this line is removed from drawSquares: // imshow(wndname,image); // (obj-c will do the drawing)
UIImage OpenCV.h
UIImage类是一个objC文件,包含在UIImage和cv :: Mat图像格式之间转换的代码.这是你移动你的两个方法的地方 – (UIImage *)UIImageFromCVMat:(cv :: Mat)cvMat和 – (cv :: Mat)cvMatWithImage:(UIImage *)图像
//UIImage+OpenCV.h#import <UIKit/UIKit.h>@interface UIImage (UIImage_OpenCV) //cv::Mat to UIImage+ (UIImage *)imageFromCVMat:(cv::Mat&)cvMat; //UIImage to cv::Mat- (cv::Mat)CVMat;@end
这里的方法实现与你的代码没有变化(虽然我们没有传递UIImage进行转换,而是我们引用self)
总结以上是内存溢出为你收集整理的iOS:从背景图像中检索矩形图像全部内容,希望文章能够帮你解决iOS:从背景图像中检索矩形图像所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)