当前位置:首页 » 密码管理 » opencv访问像素

opencv访问像素

发布时间: 2022-09-23 22:46:03

❶ 使用OpenCV如何获取图像每个像素的灰度值

#include"cv.h"
#include"highgui.h"
#include
<iostream>
using
namespace
std;
int
main(int
argc,
char**
argv)
{
IplImage*
src
=
cvLoadImage(
"0.bmp",
0
);//导入图片
int
width=src->width;//图片宽度
int
height
=
src->height;//图片高度
for
(size_t
row=0;row<height;row++)
{
uchar*
ptr
=
(uchar*)src->imageData+row*src->width;//获得灰度值数据指针
for
(size_t
cols=0;cols<width;cols++)
{
int
intensity=ptr[cols];
cout<<intensity<<"
";
}
}
return
0;
}

❷ opencv如何读取多边形区域内的像素值

有几个方法:

1,将轮廓数据通过drawContours画到一张图像上,遍历图像,逐一判断点是否在多边形内。使用pointPolygonTest函数。

2,将轮廓数据通过drawContours(参数中要选择填充模式,颜色白色)画到一张黑色图片上,填充颜色为白色,使用countNonZero找到图片中的非零像素数。注意边界条件,可能最后的结果需要减去轮廓长度(如果轮廓和内部颜色一致的话,我理解的轮廓边缘的点不算轮廓内)。

PS:DrawContours:在图像上绘制外部和内部轮廓,函数DrawContours用于在图像上绘制外部和内部轮廓。当thickness >= 0 时,绘制轮廓线;否则填充由轮廓包围的部分。

❸ 如何用opencv访问图像的像素

#include"cv.h" #include"highgui.h" #include #include using namespace std; using namespace cv; int main(int argc, char** argv) { IplImage *myimage = cvLoadImage("D:\\w.jpg",1);//通过图片路径载入图片,参数1是彩色图片,

❹ opencv 怎么取到灰度图像的像素值,C++接口的

opencv获取灰度图像的像素值的方法如下:

int main()
{
Mat img = imread("lena.jpg");
imshow("Lena Original", img);

for (int row = 0; row < img.rows; row++)
{
for (int col = 0; col < img.cols; col++)
{
//主要是这里的代码
if(*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1() * 2) > 128)
{
//[row, col]像素的第 1 通道地址被 * 解析(blue通道)
*(img.data + img.step[0] * row + img.step[1] * col) = 255;
//[row, col]像素的第 2 通道地址被 * 解析(green通道), 关于elemSize1函数的更多描述请见 Fn1 里所列的博文链接
*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1()) = 255;
//[row, col]像素的第 3 通道地址被 * 解析(red通道)
*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1() * 2) = 255;
}
}
}
imshow("Lena Modified", img);
cvWaitKey();
return 0;
}

输出:

❺ opencv如何计算图像中物体的像素值

OpenCV中获取图像某一像素值
This is a basic example for the OpenCV.

First we must know the structure of IplImage:
IPL image:
IplImage
|-- int nChannels; // Number of color channels (1,2,3,4)
|-- int depth; // Pixel depth in bits:
| // IPL_DEPTH_8U, IPL_DEPTH_8S,
| // IPL_DEPTH_16U,IPL_DEPTH_16S,
| // IPL_DEPTH_32S,IPL_DEPTH_32F,
| // IPL_DEPTH_64F
|-- int width; // image width in pixels
|-- int height; // image height in pixels
|-- char* imageData; // pointer to aligned image data
| // Note that color images are stored in BGR order
|-- int dataOrder; // 0 - interleaved color channels,
| // 1 - separate color channels
| // cvCreateImage can only create interleaved images
|-- int origin; // 0 - top-left origin,
| // 1 - bottom-left origin (Windows bitmaps style)
|-- int widthStep; // size of aligned image row in bytes
|-- int imageSize; // image data size in bytes = height*widthStep
|-- struct _IplROI *roi;// image ROI. when not NULL specifies image
| // region to be processed.
|-- char *imageDataOrigin; // pointer to the unaligned origin of image data
| // (needed for correct image deallocation)
|
|-- int align; // Alignment of image rows: 4 or 8 byte alignment
| // OpenCV ignores this and uses widthStep instead
|-- char colorModel[4]; // Color model - ignored by OpenCV
//------------------------------------------------------------------------------int main(int argc, char* argv[])
...{
IplImage *img=cvLoadImage("c://fruitfs.bmp",1);
CvScalar s;
for(int i=0;i<img->height;i++)...{
for(int j=0;j<img->width;j++)...{
s=cvGet2D(img,i,j); // get the (i,j) pixel value
printf("B=%f, G=%f, R=%f ",s.val[0],s.val[1],s.val[2]);
s.val[0]=111;
s.val[1]=111;
s.val[2]=111;
cvSet2D(img,i,j,s);//set the (i,j) pixel value
}
}
cvNamedWindow("Image",1);
cvShowImage("Image",img);
cvWaitKey(0); //等待按键
cvDestroyWindow( "Image" );//销毁窗口
cvReleaseImage( &img ); //释放图像
return 0;
}

其实还有更好的方法,例如将其封装成类,调用起来更加方便,效率也很高。

❻ opencv访问像素程序中这句话怎么理解(uchar*)src->imageData + i*src->width

第一种:RGB color space
第二种:RG color space
第三种:Ycrcb之cr分量+otsu阈值化
第四种:YCrCb中133<=Cr<=173 77<=Cb<=127
第五种:HSV中 7<H<29
下一步需要滤波操作 因为检测结果中有许多瑕疵
[cpp] view plain
#include "highgui.h"
#include "cv.h"

// skin region location using rgb limitation
void SkinRGB(IplImage* rgb,IplImage* _dst)
{
assert(rgb->nChannels==3&& _dst->nChannels==3);

static const int R=2;
static const int G=1;
static const int B=0;

IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<rgb->height;h++) {
unsigned char* prgb=(unsigned char*)rgb->imageData+h*rgb->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<rgb->width;w++) {
if ((prgb[R]>95 && prgb[G]>40 && prgb[B]>20 &&
prgb[R]-prgb[B]>15 && prgb[R]-prgb[G]>15/*&&
!(prgb[R]>170&&prgb[G]>170&&prgb[B]>170)*/)||//uniform illumination
(prgb[R]>200 && prgb[G]>210 && prgb[B]>170 &&
abs(prgb[R]-prgb[B])<=15 && prgb[R]>prgb[B]&& prgb[G]>prgb[B])//lateral illumination
) {
memcpy(pdst,prgb,3);
}
prgb+=3;
pdst+=3;
}
}
cvCopyImage(dst,_dst);
cvReleaseImage(&dst);
}
// skin detection in rg space
void cvSkinRG(IplImage* rgb,IplImage* gray)
{
assert(rgb->nChannels==3&&gray->nChannels==1);

const int R=2;
const int G=1;
const int B=0;

double Aup=-1.8423;
double Bup=1.5294;
double Cup=0.0422;
double Adown=-0.7279;
double Bdown=0.6066;
double Cdown=0.1766;
for (int h=0;h<rgb->height;h++) {
unsigned char* pGray=(unsigned char*)gray->imageData+h*gray->widthStep;
unsigned char* pRGB=(unsigned char* )rgb->imageData+h*rgb->widthStep;
for (int w=0;w<rgb->width;w++)
{
int s=pRGB[R]+pRGB[G]+pRGB[B];
double r=(double)pRGB[R]/s;
double g=(double)pRGB[G]/s;
double Gup=Aup*r*r+Bup*r+Cup;
double Gdown=Adown*r*r+Bdown*r+Cdown;
double Wr=(r-0.33)*(r-0.33)+(g-0.33)*(g-0.33);
if (g<Gup && g>Gdown && Wr>0.004)
{
*pGray=255;
}
else
{
*pGray=0;
}
pGray++;
pRGB+=3;
}
}

}
// implementation of otsu algorithm
// author: onezeros#yahoo.cn
// reference: Rafael C. Gonzalez. Digital Image Processing Using MATLAB
void cvThresholdOtsu(IplImage* src, IplImage* dst)
{
int height=src->height;
int width=src->width;

//histogram
float histogram[256]={0};
for(int i=0;i<height;i++) {
unsigned char* p=(unsigned char*)src->imageData+src->widthStep*i;
for(int j=0;j<width;j++) {
histogram[*p++]++;
}
}
//normalize histogram
int size=height*width;
for(int i=0;i<256;i++) {
histogram[i]=histogram[i]/size;
}

//average pixel value
float avgValue=0;
for(int i=0;i<256;i++) {
avgValue+=i*histogram[i];
}

int threshold;
float maxVariance=0;
float w=0,u=0;
for(int i=0;i<256;i++) {
w+=histogram[i];
u+=i*histogram[i];

float t=avgValue*w-u;
float variance=t*t/(w*(1-w));
if(variance>maxVariance) {
maxVariance=variance;
threshold=i;
}
}

cvThreshold(src,dst,threshold,255,CV_THRESH_BINARY);
}

void cvSkinOtsu(IplImage* src, IplImage* dst)
{
assert(dst->nChannels==1&& src->nChannels==3);

IplImage* ycrcb=cvCreateImage(cvGetSize(src),8,3);
IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,ycrcb,CV_BGR2YCrCb);
cvSplit(ycrcb,0,cr,0,0);

cvThresholdOtsu(cr,cr);
cvCopyImage(cr,dst);
cvReleaseImage(&cr);
cvReleaseImage(&ycrcb);
}

void cvSkinYUV(IplImage* src,IplImage* dst)
{
IplImage* ycrcb=cvCreateImage(cvGetSize(src),8,3);
//IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
//IplImage* cb=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,ycrcb,CV_BGR2YCrCb);
//cvSplit(ycrcb,0,cr,cb,0);

static const int Cb=2;
static const int Cr=1;
static const int Y=0;

//IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<src->height;h++) {
unsigned char* pycrcb=(unsigned char*)ycrcb->imageData+h*ycrcb->widthStep;
unsigned char* psrc=(unsigned char*)src->imageData+h*src->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<src->width;w++) {
if (pycrcb[Cr]>=133&&pycrcb[Cr]<=173&&pycrcb[Cb]>=77&&pycrcb[Cb]<=127)
{
memcpy(pdst,psrc,3);
}
pycrcb+=3;
psrc+=3;
pdst+=3;
}
}
//cvCopyImage(dst,_dst);
//cvReleaseImage(&dst);
}

void cvSkinHSV(IplImage* src,IplImage* dst)
{
IplImage* hsv=cvCreateImage(cvGetSize(src),8,3);
//IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
//IplImage* cb=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,hsv,CV_BGR2HSV);
//cvSplit(ycrcb,0,cr,cb,0);

static const int V=2;
static const int S=1;
static const int H=0;

//IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<src->height;h++) {
unsigned char* phsv=(unsigned char*)hsv->imageData+h*hsv->widthStep;
unsigned char* psrc=(unsigned char*)src->imageData+h*src->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<src->width;w++) {
if (phsv[H]>=7&&phsv[H]<=29)
{
memcpy(pdst,psrc,3);
}
phsv+=3;
psrc+=3;
pdst+=3;
}
}
//cvCopyImage(dst,_dst);
//cvReleaseImage(&dst);
}

int main()
{

IplImage* img= cvLoadImage("D:/skin.jpg"); //随便放一张jpg图片在D盘或另行设置目录
IplImage* dstRGB=cvCreateImage(cvGetSize(img),8,3);
IplImage* dstRG=cvCreateImage(cvGetSize(img),8,1);
IplImage* dst_crotsu=cvCreateImage(cvGetSize(img),8,1);
IplImage* dst_YUV=cvCreateImage(cvGetSize(img),8,3);
IplImage* dst_HSV=cvCreateImage(cvGetSize(img),8,3);

cvNamedWindow("inputimage", CV_WINDOW_AUTOSIZE);
cvShowImage("inputimage", img);
cvWaitKey(0);

SkinRGB(img,dstRGB);
cvNamedWindow("outputimage1", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage1", dstRGB);
cvWaitKey(0);
cvSkinRG(img,dstRG);
cvNamedWindow("outputimage2", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage2", dstRG);
cvWaitKey(0);
cvSkinOtsu(img,dst_crotsu);
cvNamedWindow("outputimage3", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage3", dst_crotsu);
cvWaitKey(0);
cvSkinYUV(img,dst_YUV);
cvNamedWindow("outputimage4", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage4", dst_YUV);
cvWaitKey(0);
cvSkinHSV(img,dst_HSV);
cvNamedWindow("outputimage5", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage5", dst_HSV);
cvWaitKey(0);
return 0;
}

❼ opencv 怎么访问cv

OpenCV2 访问图像的各个像素有各种方法

若div为8,则原来RGB每个通道的256种颜色减少为32种。

若div为64,则原来RGB每个通道的256种颜色减少为4种,此时三通道所有能表示的颜色有4×4×4 = 64

看一个函数

C++: uchar* Mat::ptr(int i=0)
i 是行号,返回的是该行数据的指针。
在OpenCV中,一张3通道图像的一个像素点是按BGR的顺序存储的。
先来看看第一种访问方案
void colorRece1(cv::Mat& image, cv::Mat& result, int
div=64){
int
nrow = image.rows;
int
ncol = image.cols * image.channels();
for(int
i=0; i
uchar*
data = image.ptr(i);
uchar*
data_out = result.ptr(i);
for(int
j=0; j
data_out[j]
= data[j]/div*div +div/2;
}}

❽ opencv求教cvsize,该怎么处理

Opencv中访问数据可以有5种类型,如下: 3、访问图像像素 (1) 假设你要访问第k通道、第i行、第j列的像素。 (2) 间接访问: (通用,但效率低,可访问任意格式的图像) 对于单通道字节型图像: IplImage* img=cvCreateImage(cvSize(640,480),IPL_

❾ 怎么访问图像中每个像素的值

#include"cv.h"
#include"highgui.h"
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
IplImage *myimage = cvLoadImage("D:\\w.jpg",1);//通过图片路径载入图片,参数1是彩色图片,0灰度图片
cvShowImage("w",myimage);
cvWaitKey(2000);//显示的时间2000ms
//像素遍历操作
for(int y=0;y<myimage->height;y++)
{
for(int x=0;x<myimage->width;x++)
{
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3]=0;//(0-255) 蓝色通道像素
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3+1]=111;//(0-255) 绿色通道像素
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3+2]=111;//(0-255) 红色通道像素
}
}//
cvShowImage("w",myimage);
cvWaitKey();
}//修改RGB三个通道的值会使得图片像素值变化,你们可以试试只修改一个通道的值,而其他两个通道值不变,体会下效果
//彩色图片的像素点值得读取就是把三个通道的值取出来,灰度图的读取就是把每个像素点的单通道值取出来
这是将现有的图片像素值重新赋值,你要读出来就不赋值,直接printf三个通道的值就行了,不知道楼主懂了没?

❿ opencv使用imageData读取像素值问题

IplImage* img=cvLoadImage(imageName);

cout<<img->width<<","<<img->height<<","<<img->widthStep;
for(int y=0;y<img->height;y++)
{
unsigned char* p=(unsigned char*)(img->imageData+y*img->widthStep);
for(int x=0;x<img->width*img->nChannels;x++)
{
printf("%d ",p[x]);
}

}
你为什么要改为int型呢,存储的是char,你使用int型导致寻址方式改变,访问了越界的内存。另外,你确定你的图像是单通道图像么?

热点内容
war3存储空间不足 发布:2024-04-28 13:20:54 浏览:949
微信密码已经忘记了如何找回 发布:2024-04-28 11:54:13 浏览:304
腾讯云服务器可以备案几个网站 发布:2024-04-28 11:54:12 浏览:458
影响编译速度的因素有哪些 发布:2024-04-28 11:53:58 浏览:255
安全配置汽车有哪些 发布:2024-04-28 11:48:07 浏览:830
存储链路中断 发布:2024-04-28 11:11:39 浏览:734
安卓锤子手机怎么改文字 发布:2024-04-28 11:09:52 浏览:100
列举贪心算法 发布:2024-04-28 10:44:15 浏览:698
文具店疫情演练脚本 发布:2024-04-28 10:23:40 浏览:25
绿色商城源码 发布:2024-04-28 10:04:45 浏览:587