當前位置:首頁 » 密碼管理 » opencv訪問像素

opencv訪問像素

發布時間: 2022-09-23 22:46:03

❶ 使用OpenCV如何獲取圖像每個像素的灰度值

#include"cv.h"
#include"highgui.h"
#include
<iostream>
using
namespace
std;
int
main(int
argc,
char**
argv)
{
IplImage*
src
=
cvLoadImage(
"0.bmp",
0
);//導入圖片
int
width=src->width;//圖片寬度
int
height
=
src->height;//圖片高度
for
(size_t
row=0;row<height;row++)
{
uchar*
ptr
=
(uchar*)src->imageData+row*src->width;//獲得灰度值數據指針
for
(size_t
cols=0;cols<width;cols++)
{
int
intensity=ptr[cols];
cout<<intensity<<"
";
}
}
return
0;
}

❷ opencv如何讀取多邊形區域內的像素值

有幾個方法:

1,將輪廓數據通過drawContours畫到一張圖像上,遍歷圖像,逐一判斷點是否在多邊形內。使用pointPolygonTest函數。

2,將輪廓數據通過drawContours(參數中要選擇填充模式,顏色白色)畫到一張黑色圖片上,填充顏色為白色,使用countNonZero找到圖片中的非零像素數。注意邊界條件,可能最後的結果需要減去輪廓長度(如果輪廓和內部顏色一致的話,我理解的輪廓邊緣的點不算輪廓內)。

PS:DrawContours:在圖像上繪制外部和內部輪廓,函數DrawContours用於在圖像上繪制外部和內部輪廓。當thickness >= 0 時,繪制輪廓線;否則填充由輪廓包圍的部分。

❸ 如何用opencv訪問圖像的像素

#include"cv.h" #include"highgui.h" #include #include using namespace std; using namespace cv; int main(int argc, char** argv) { IplImage *myimage = cvLoadImage("D:\\w.jpg",1);//通過圖片路徑載入圖片,參數1是彩色圖片,

❹ opencv 怎麼取到灰度圖像的像素值,C++介面的

opencv獲取灰度圖像的像素值的方法如下:

int main()
{
Mat img = imread("lena.jpg");
imshow("Lena Original", img);

for (int row = 0; row < img.rows; row++)
{
for (int col = 0; col < img.cols; col++)
{
//主要是這里的代碼
if(*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1() * 2) > 128)
{
//[row, col]像素的第 1 通道地址被 * 解析(blue通道)
*(img.data + img.step[0] * row + img.step[1] * col) = 255;
//[row, col]像素的第 2 通道地址被 * 解析(green通道), 關於elemSize1函數的更多描述請見 Fn1 里所列的博文鏈接
*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1()) = 255;
//[row, col]像素的第 3 通道地址被 * 解析(red通道)
*(img.data + img.step[0] * row + img.step[1] * col + img.elemSize1() * 2) = 255;
}
}
}
imshow("Lena Modified", img);
cvWaitKey();
return 0;
}

輸出:

❺ opencv如何計算圖像中物體的像素值

OpenCV中獲取圖像某一像素值
This is a basic example for the OpenCV.

First we must know the structure of IplImage:
IPL image:
IplImage
|-- int nChannels; // Number of color channels (1,2,3,4)
|-- int depth; // Pixel depth in bits:
| // IPL_DEPTH_8U, IPL_DEPTH_8S,
| // IPL_DEPTH_16U,IPL_DEPTH_16S,
| // IPL_DEPTH_32S,IPL_DEPTH_32F,
| // IPL_DEPTH_64F
|-- int width; // image width in pixels
|-- int height; // image height in pixels
|-- char* imageData; // pointer to aligned image data
| // Note that color images are stored in BGR order
|-- int dataOrder; // 0 - interleaved color channels,
| // 1 - separate color channels
| // cvCreateImage can only create interleaved images
|-- int origin; // 0 - top-left origin,
| // 1 - bottom-left origin (Windows bitmaps style)
|-- int widthStep; // size of aligned image row in bytes
|-- int imageSize; // image data size in bytes = height*widthStep
|-- struct _IplROI *roi;// image ROI. when not NULL specifies image
| // region to be processed.
|-- char *imageDataOrigin; // pointer to the unaligned origin of image data
| // (needed for correct image deallocation)
|
|-- int align; // Alignment of image rows: 4 or 8 byte alignment
| // OpenCV ignores this and uses widthStep instead
|-- char colorModel[4]; // Color model - ignored by OpenCV
//------------------------------------------------------------------------------int main(int argc, char* argv[])
...{
IplImage *img=cvLoadImage("c://fruitfs.bmp",1);
CvScalar s;
for(int i=0;i<img->height;i++)...{
for(int j=0;j<img->width;j++)...{
s=cvGet2D(img,i,j); // get the (i,j) pixel value
printf("B=%f, G=%f, R=%f ",s.val[0],s.val[1],s.val[2]);
s.val[0]=111;
s.val[1]=111;
s.val[2]=111;
cvSet2D(img,i,j,s);//set the (i,j) pixel value
}
}
cvNamedWindow("Image",1);
cvShowImage("Image",img);
cvWaitKey(0); //等待按鍵
cvDestroyWindow( "Image" );//銷毀窗口
cvReleaseImage( &img ); //釋放圖像
return 0;
}

其實還有更好的方法,例如將其封裝成類,調用起來更加方便,效率也很高。

❻ opencv訪問像素程序中這句話怎麼理解(uchar*)src->imageData + i*src->width

第一種:RGB color space
第二種:RG color space
第三種:Ycrcb之cr分量+otsu閾值化
第四種:YCrCb中133<=Cr<=173 77<=Cb<=127
第五種:HSV中 7<H<29
下一步需要濾波操作 因為檢測結果中有許多瑕疵
[cpp] view plain
#include "highgui.h"
#include "cv.h"

// skin region location using rgb limitation
void SkinRGB(IplImage* rgb,IplImage* _dst)
{
assert(rgb->nChannels==3&& _dst->nChannels==3);

static const int R=2;
static const int G=1;
static const int B=0;

IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<rgb->height;h++) {
unsigned char* prgb=(unsigned char*)rgb->imageData+h*rgb->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<rgb->width;w++) {
if ((prgb[R]>95 && prgb[G]>40 && prgb[B]>20 &&
prgb[R]-prgb[B]>15 && prgb[R]-prgb[G]>15/*&&
!(prgb[R]>170&&prgb[G]>170&&prgb[B]>170)*/)||//uniform illumination
(prgb[R]>200 && prgb[G]>210 && prgb[B]>170 &&
abs(prgb[R]-prgb[B])<=15 && prgb[R]>prgb[B]&& prgb[G]>prgb[B])//lateral illumination
) {
memcpy(pdst,prgb,3);
}
prgb+=3;
pdst+=3;
}
}
cvCopyImage(dst,_dst);
cvReleaseImage(&dst);
}
// skin detection in rg space
void cvSkinRG(IplImage* rgb,IplImage* gray)
{
assert(rgb->nChannels==3&&gray->nChannels==1);

const int R=2;
const int G=1;
const int B=0;

double Aup=-1.8423;
double Bup=1.5294;
double Cup=0.0422;
double Adown=-0.7279;
double Bdown=0.6066;
double Cdown=0.1766;
for (int h=0;h<rgb->height;h++) {
unsigned char* pGray=(unsigned char*)gray->imageData+h*gray->widthStep;
unsigned char* pRGB=(unsigned char* )rgb->imageData+h*rgb->widthStep;
for (int w=0;w<rgb->width;w++)
{
int s=pRGB[R]+pRGB[G]+pRGB[B];
double r=(double)pRGB[R]/s;
double g=(double)pRGB[G]/s;
double Gup=Aup*r*r+Bup*r+Cup;
double Gdown=Adown*r*r+Bdown*r+Cdown;
double Wr=(r-0.33)*(r-0.33)+(g-0.33)*(g-0.33);
if (g<Gup && g>Gdown && Wr>0.004)
{
*pGray=255;
}
else
{
*pGray=0;
}
pGray++;
pRGB+=3;
}
}

}
// implementation of otsu algorithm
// author: onezeros#yahoo.cn
// reference: Rafael C. Gonzalez. Digital Image Processing Using MATLAB
void cvThresholdOtsu(IplImage* src, IplImage* dst)
{
int height=src->height;
int width=src->width;

//histogram
float histogram[256]={0};
for(int i=0;i<height;i++) {
unsigned char* p=(unsigned char*)src->imageData+src->widthStep*i;
for(int j=0;j<width;j++) {
histogram[*p++]++;
}
}
//normalize histogram
int size=height*width;
for(int i=0;i<256;i++) {
histogram[i]=histogram[i]/size;
}

//average pixel value
float avgValue=0;
for(int i=0;i<256;i++) {
avgValue+=i*histogram[i];
}

int threshold;
float maxVariance=0;
float w=0,u=0;
for(int i=0;i<256;i++) {
w+=histogram[i];
u+=i*histogram[i];

float t=avgValue*w-u;
float variance=t*t/(w*(1-w));
if(variance>maxVariance) {
maxVariance=variance;
threshold=i;
}
}

cvThreshold(src,dst,threshold,255,CV_THRESH_BINARY);
}

void cvSkinOtsu(IplImage* src, IplImage* dst)
{
assert(dst->nChannels==1&& src->nChannels==3);

IplImage* ycrcb=cvCreateImage(cvGetSize(src),8,3);
IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,ycrcb,CV_BGR2YCrCb);
cvSplit(ycrcb,0,cr,0,0);

cvThresholdOtsu(cr,cr);
cvCopyImage(cr,dst);
cvReleaseImage(&cr);
cvReleaseImage(&ycrcb);
}

void cvSkinYUV(IplImage* src,IplImage* dst)
{
IplImage* ycrcb=cvCreateImage(cvGetSize(src),8,3);
//IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
//IplImage* cb=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,ycrcb,CV_BGR2YCrCb);
//cvSplit(ycrcb,0,cr,cb,0);

static const int Cb=2;
static const int Cr=1;
static const int Y=0;

//IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<src->height;h++) {
unsigned char* pycrcb=(unsigned char*)ycrcb->imageData+h*ycrcb->widthStep;
unsigned char* psrc=(unsigned char*)src->imageData+h*src->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<src->width;w++) {
if (pycrcb[Cr]>=133&&pycrcb[Cr]<=173&&pycrcb[Cb]>=77&&pycrcb[Cb]<=127)
{
memcpy(pdst,psrc,3);
}
pycrcb+=3;
psrc+=3;
pdst+=3;
}
}
//cvCopyImage(dst,_dst);
//cvReleaseImage(&dst);
}

void cvSkinHSV(IplImage* src,IplImage* dst)
{
IplImage* hsv=cvCreateImage(cvGetSize(src),8,3);
//IplImage* cr=cvCreateImage(cvGetSize(src),8,1);
//IplImage* cb=cvCreateImage(cvGetSize(src),8,1);
cvCvtColor(src,hsv,CV_BGR2HSV);
//cvSplit(ycrcb,0,cr,cb,0);

static const int V=2;
static const int S=1;
static const int H=0;

//IplImage* dst=cvCreateImage(cvGetSize(_dst),8,3);
cvZero(dst);

for (int h=0;h<src->height;h++) {
unsigned char* phsv=(unsigned char*)hsv->imageData+h*hsv->widthStep;
unsigned char* psrc=(unsigned char*)src->imageData+h*src->widthStep;
unsigned char* pdst=(unsigned char*)dst->imageData+h*dst->widthStep;
for (int w=0;w<src->width;w++) {
if (phsv[H]>=7&&phsv[H]<=29)
{
memcpy(pdst,psrc,3);
}
phsv+=3;
psrc+=3;
pdst+=3;
}
}
//cvCopyImage(dst,_dst);
//cvReleaseImage(&dst);
}

int main()
{

IplImage* img= cvLoadImage("D:/skin.jpg"); //隨便放一張jpg圖片在D盤或另行設置目錄
IplImage* dstRGB=cvCreateImage(cvGetSize(img),8,3);
IplImage* dstRG=cvCreateImage(cvGetSize(img),8,1);
IplImage* dst_crotsu=cvCreateImage(cvGetSize(img),8,1);
IplImage* dst_YUV=cvCreateImage(cvGetSize(img),8,3);
IplImage* dst_HSV=cvCreateImage(cvGetSize(img),8,3);

cvNamedWindow("inputimage", CV_WINDOW_AUTOSIZE);
cvShowImage("inputimage", img);
cvWaitKey(0);

SkinRGB(img,dstRGB);
cvNamedWindow("outputimage1", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage1", dstRGB);
cvWaitKey(0);
cvSkinRG(img,dstRG);
cvNamedWindow("outputimage2", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage2", dstRG);
cvWaitKey(0);
cvSkinOtsu(img,dst_crotsu);
cvNamedWindow("outputimage3", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage3", dst_crotsu);
cvWaitKey(0);
cvSkinYUV(img,dst_YUV);
cvNamedWindow("outputimage4", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage4", dst_YUV);
cvWaitKey(0);
cvSkinHSV(img,dst_HSV);
cvNamedWindow("outputimage5", CV_WINDOW_AUTOSIZE);
cvShowImage("outputimage5", dst_HSV);
cvWaitKey(0);
return 0;
}

❼ opencv 怎麼訪問cv

OpenCV2 訪問圖像的各個像素有各種方法

若div為8,則原來RGB每個通道的256種顏色減少為32種。

若div為64,則原來RGB每個通道的256種顏色減少為4種,此時三通道所有能表示的顏色有4×4×4 = 64

看一個函數

C++: uchar* Mat::ptr(int i=0)
i 是行號,返回的是該行數據的指針。
在OpenCV中,一張3通道圖像的一個像素點是按BGR的順序存儲的。
先來看看第一種訪問方案
void colorRece1(cv::Mat& image, cv::Mat& result, int
div=64){
int
nrow = image.rows;
int
ncol = image.cols * image.channels();
for(int
i=0; i
uchar*
data = image.ptr(i);
uchar*
data_out = result.ptr(i);
for(int
j=0; j
data_out[j]
= data[j]/div*div +div/2;
}}

❽ opencv求教cvsize,該怎麼處理

Opencv中訪問數據可以有5種類型,如下: 3、訪問圖像像素 (1) 假設你要訪問第k通道、第i行、第j列的像素。 (2) 間接訪問: (通用,但效率低,可訪問任意格式的圖像) 對於單通道位元組型圖像: IplImage* img=cvCreateImage(cvSize(640,480),IPL_

❾ 怎麼訪問圖像中每個像素的值

#include"cv.h"
#include"highgui.h"
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
IplImage *myimage = cvLoadImage("D:\\w.jpg",1);//通過圖片路徑載入圖片,參數1是彩色圖片,0灰度圖片
cvShowImage("w",myimage);
cvWaitKey(2000);//顯示的時間2000ms
//像素遍歷操作
for(int y=0;y<myimage->height;y++)
{
for(int x=0;x<myimage->width;x++)
{
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3]=0;//(0-255) 藍色通道像素
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3+1]=111;//(0-255) 綠色通道像素
((uchar*)(myimage ->imageData + myimage->widthStep*y))[x*3+2]=111;//(0-255) 紅色通道像素
}
}//
cvShowImage("w",myimage);
cvWaitKey();
}//修改RGB三個通道的值會使得圖片像素值變化,你們可以試試只修改一個通道的值,而其他兩個通道值不變,體會下效果
//彩色圖片的像素點值得讀取就是把三個通道的值取出來,灰度圖的讀取就是把每個像素點的單通道值取出來
這是將現有的圖片像素值重新賦值,你要讀出來就不賦值,直接printf三個通道的值就行了,不知道樓主懂了沒?

❿ opencv使用imageData讀取像素值問題

IplImage* img=cvLoadImage(imageName);

cout<<img->width<<","<<img->height<<","<<img->widthStep;
for(int y=0;y<img->height;y++)
{
unsigned char* p=(unsigned char*)(img->imageData+y*img->widthStep);
for(int x=0;x<img->width*img->nChannels;x++)
{
printf("%d ",p[x]);
}

}
你為什麼要改為int型呢,存儲的是char,你使用int型導致定址方式改變,訪問了越界的內存。另外,你確定你的圖像是單通道圖像么?

熱點內容
編程畫櫻花 發布:2024-03-29 02:11:24 瀏覽:471
騰訊雲伺服器1mb老掉線 發布:2024-03-29 01:56:11 瀏覽:213
執行sql語句的存儲過程 發布:2024-03-29 01:52:37 瀏覽:695
婚紗攝影腳本 發布:2024-03-29 01:47:40 瀏覽:899
我的世界伺服器咋開外掛 發布:2024-03-29 01:07:45 瀏覽:455
sql寫報表 發布:2024-03-29 01:03:23 瀏覽:305
家用伺服器怎麼選 發布:2024-03-29 00:49:18 瀏覽:401
Ap6510dn如何配置 發布:2024-03-29 00:38:47 瀏覽:333
安卓和蘋果哪個更佔用內存 發布:2024-03-29 00:37:02 瀏覽:424
編譯錯誤算bug嗎 發布:2024-03-29 00:23:03 瀏覽:34