演算法rx
① SGM半全局匹配演算法求助
#include "stdafx.h"
#include <cstdio>
#include <cstring>
#include <iostream>
#include<cv.h>
#include<highgui.h>
#include <cmath>
using namespace std;
const int Width = 1024;
const int Height = 1024;
int Lvalue[Width][Width];
uchar C(int x, int y, int d, IplImage * matchImage, IplImage * baseImage)
{
uchar * pMatchPixel = NULL;
uchar * pBasePixel = NULL;
uchar matchPixel = 0;
uchar basePixel =0;
uchar matchMax = 0;
uchar matchMin = 0;
uchar tempMatch1 = 0;
uchar tempMatch2 = 0;
uchar difPixel = 0;
if (x+d <= matchImage->width && x+d >= 0)
{
pMatchPixel = (uchar *)matchImage->imageData + y*matchImage->widthStep + (x+d);
matchPixel = *pMatchPixel;
pBasePixel= (uchar *)baseImage->imageData + y*baseImage->widthStep + x;
basePixel = *pBasePixel;
//匹配影像上的像素點與其左、右像素點的平均值,線性插值的方法
tempMatch1 = (*pMatchPixel +(*(pMatchPixel -1)))/2;
tempMatch2 = (*pMatchPixel +(*(pMatchPixel +1)))/2;
matchMax = max(max(tempMatch1,tempMatch2),matchPixel);
matchMin = min(min(tempMatch1,tempMatch2),matchPixel);
//賦予視差d時的匹配代價C
//BT法
difPixel = max(max(basePixel - matchMax, matchMin - basePixel),0);
//AD法
//difPixel = abs(basePixel - matchPixel);
return difPixel;
}
else
return 255;
}
int main()
{
IplImage * leftImage = cvLoadImage("headL.png",0);
IplImage * rightImage = cvLoadImage("headR.png",0);
int imageWidth = leftImage->width;
int imageHeight =leftImage->height;
int minLvalue = 1000;
int minL1 = 1000;
int minL2 = 1000;
int P1 = 2;
int P2 = 5;
int disparity= 0;
int minDis = 0;
int maxDis = 21;
int scale = 12;
unsigned char * pPixel = NULL;
#pragma region 實現橫向DP(以右影像為准,0度方向)
IplImage * MyDPImage_0 = cvCreateImage(cvGetSize(leftImage),leftImage->depth,1);
cvZero(MyDPImage_0);
int t3 = clock();
for (int i = 0; i < imageHeight;i++)
{
for (int j = 0; j<imageWidth ;j++)
{
disparity = 0;
minL1 = 1000;
minL2 = 1000;
for (int d = minDis; d <= maxDis; d++)
{
//初始化為代價函數的值
Lvalue[j][d] = C(j, i, d, leftImage, rightImage);
if (j > 0)
{
minL1 = min(minL1, Lvalue[j-1][d]);
}
}
for (int d = minDis; d <= maxDis; d++)
{
if (j > 0)
{
minL2 = min(minL2, Lvalue[j-1][d]);
minL2 = min(minL2, (Lvalue[j-1][d+1] + P1));
minL2 = min(minL2, (Lvalue[j-1][d-1] + P1));
minL2 = min(minL2, (minL1 + P2));
Lvalue[j][d] = Lvalue[j][d] + (minL2 - minL1);
}
}
int max=Lvalue[j][0];
for(int d=minDis;d<=maxDis;++d)
{
if(Lvalue[j][d]<max)
{
disparity=d;
max=Lvalue[j][d];
}
}
disparity=disparity*scale;
//生成視差圖
pPixel = (uchar *)MyDPImage_0->imageData + i*MyDPImage_0->widthStep + j;
*pPixel =disparity;
}
}
int t4 = clock();
cout<<"橫向DP共用時: "<<t4-t3<<"ms"<<endl;
cvNamedWindow("MyDPImage_0", 1);
cvShowImage("MyDPImage_0", MyDPImage_0);
cvSaveImage("MyDPImage_0.jpg", MyDPImage_0);
#pragma endregion
#pragma region 實現橫向DP(以左影像為准,0度方向)
IplImage * MyDPImage_0_L = cvCreateImage(cvGetSize(leftImage),leftImage->depth,1);
cvZero(MyDPImage_0_L);
for (int i = 0; i < imageHeight;i++)
{
for (int j = 0; j<imageWidth;j++)
{
disparity = 0;
minL1 = 1000;
minL2 = 1000;
for (int d = minDis; d <= maxDis; d++)
{
//初始化為代價函數的值
Lvalue[j][d] = C(j, i, -d, rightImage, leftImage);
if (j > 0)
{
minL1 = min(minL1, Lvalue[j-1][d]);
}
}
for (int d = minDis; d <= maxDis; d++)
{
if (j > 0)
{
minL2 = min(minL2, Lvalue[j-1][d]);
minL2 = min(minL2, (Lvalue[j-1][d+1] + P1));
minL2 = min(minL2, (Lvalue[j-1][d-1] + P1));
minL2 = min(minL2, (minL1 + P2));
Lvalue[j][d] = Lvalue[j][d] + minL2 - minL1;
}
}
int max=Lvalue[j][0];
for(int d=0;d<=maxDis;++d)
{
if(Lvalue[j][d]<max)
{
disparity=d;
max=Lvalue[j][d];
}
}
disparity = disparity * scale;
//生成視差圖
pPixel = (uchar *)MyDPImage_0_L->imageData + i*MyDPImage_0_L->widthStep + j;
*pPixel = disparity;
}
}
cvNamedWindow("MyDPImage_0_L", 1);
cvShowImage("MyDPImage_0_L", MyDPImage_0_L);
cvSaveImage("MyDPImage_0_L.jpg", MyDPImage_0_L);
#pragma endregion
#pragma region 一致性檢查
uchar * pLeftDepthPixel = NULL;
uchar * pRightDepthPixel = NULL;
uchar leftDepthPixel = 0;
uchar rightDepthPixel =0;
uchar difDepthPixel = 0;
IplImage * CheckImage_0 = cvCloneImage(MyDPImage_0);
cvZero(CheckImage_0);
for (int i = 0; i < imageHeight; i++)
{
for(int j = 0; j < imageWidth; j++)
{
pRightDepthPixel = (uchar *)MyDPImage_0->imageData + i*MyDPImage_0->widthStep + j;
rightDepthPixel = *pRightDepthPixel;
if(j + rightDepthPixel/scale < imageWidth)
{
pLeftDepthPixel = (uchar *)MyDPImage_0_L->imageData + i*MyDPImage_0_L->widthStep + j + rightDepthPixel/scale;
leftDepthPixel = *pLeftDepthPixel;
}
else
leftDepthPixel = 0;
difDepthPixel = abs((leftDepthPixel - rightDepthPixel)/scale);
pPixel = (uchar *)CheckImage_0->imageData + i * CheckImage_0->widthStep +j;
if (difDepthPixel <= 1)
{
*pPixel = rightDepthPixel;
}
else
{
//否則,當前像素的視差值為零
*pPixel = 0;
}
}
}
int lp,rp;
int lx,rx;
for(int i=0;i<imageHeight;i++)
{
for(int j=0;j<imageWidth;j++)
{
int tem = *((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + j);
if(tem==0)
{
lp = rp = 0;
lx = j;
rx = j;
if(lx-1<0)
lp= *((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + lx);
while((lp==0)&&( lx-1 >= 0 ))
lp = *((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + (--lx));
if(rx+1>=imageWidth)
rp = *((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + rx);
while((rp==0)&&(rx+1<imageWidth))
rp = *((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep +(++rx));
if (lp > rp)
{
*((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + j) = rp;
}
else
{
*((uchar *)CheckImage_0->imageData+i*CheckImage_0->widthStep + j) = lp;
}
}
}
}
cvSmooth(CheckImage_0,CheckImage_0,CV_MEDIAN,3,0,0);
cvNamedWindow("CheckImage_0", 1);
cvShowImage("CheckImage_0", CheckImage_0);
cvSaveImage("CheckImage_0.jpg", CheckImage_0);
#pragma endregion
cout << "完成!"<<endl;
cvWaitKey(0);
return 0;
}
② 3個led接在10電源上需多大限流電阻
單個普通白光:電流20mA電壓3.2V
3個串聯是9.6V,20組並聯式400mA
電源可以滿足輸出要求
每組限流電阻Rx演算法:3.2V×3+Rx×20mA=12V
可得:Rx=120Ω
③ MATLAB中RX演算法的程序~急用
function [ROUTES,PL,Tau]=ACASP(G,Tau,K,M,S,E,Alpha,Beta,Rho,Q)
%% ---------------------------------------------------------------
% ACASP.m
% 蟻群演算法動態尋路演算法
% ChengAihua,PLA Information Engineering University,ZhengZhou,China
% Email:[email protected]
% All rights reserved
%% ---------------------------------------------------------------
% 輸入參數列表
% G 地形圖為01矩陣,如果為1表示障礙物
% Tau 初始信息素矩陣(認為前面的覓食活動中有殘留的信息素)
% K 迭代次數(指螞蟻出動多少波)
% M 螞蟻個數(每一波螞蟻有多少個)
% S 起始點(最短路徑的起始點)
% E 終止點(最短路徑的目的點)
% Alpha 表徵信息素重要程度的參數
% Beta 表徵啟發式因子重要程度的參數
% Rho 信息素蒸發系數
% Q 信息素增加強度系數
%
% 輸出參數列表
% ROUTES 每一代的每一隻螞蟻的爬行路線
% PL 每一代的每一隻螞蟻的爬行路線長度
% Tau 輸出動態修正過的信息素
%% --------------------變數初始化----------------------------------
%load
D=G2D(G);
N=size(D,1);%N表示問題的規模(象素個數)
MM=size(G,1);
a=1;%小方格象素的邊長
Ex=a*(mod(E,MM)-0.5);%終止點橫坐標
if Ex==-0.5
Ex=MM-0.5;
end
Ey=a*(MM+0.5-ceil(E/MM));%終止點縱坐標
Eta=zeros(1,N);%啟發式信息,取為至目標點的直線距離的倒數
%下面構造啟發式信息矩陣
for i=1:N
if ix==-0.5
ix=MM-0.5;
end
iy=a*(MM+0.5-ceil(i/MM));
if i~=E
Eta(1,i)=1/((ix-Ex)^2+(iy-Ey)^2)^0.5;
else
Eta(1,i)=100;
end
end
ROUTES=cell(K,M);%用細胞結構存儲每一代的每一隻螞蟻的爬行路線
PL=zeros(K,M);%用矩陣存儲每一代的每一隻螞蟻的爬行路線長度
%% -----------啟動K輪螞蟻覓食活動,每輪派出M只螞蟻--------------------
for k=1:K
disp(k);
for m=1:M
%% 第一步:狀態初始化
W=S;%當前節點初始化為起始點
Path=S;%爬行路線初始化
PLkm=0;%爬行路線長度初始化
TABUkm=ones(1,N);%禁忌表初始化
TABUkm(S)=0;%已經在初始點了,因此要排除
DD=D;%鄰接矩陣初始化
%% 第二步:下一步可以前往的節點
DW=DD(W,:);
DW1=find(DW
for j=1:length(DW1)
if TABUkm(DW1(j))==0
DW(j)=inf;
end
end
LJD=find(DW
Len_LJD=length(LJD);%可選節點的個數
%% 覓食停止條件:螞蟻未遇到食物或者陷入死胡同
while W~=E&&Len_LJD>=1
%% 第三步:轉輪賭法選擇下一步怎麼走
PP=zeros(1,Len_LJD);
for i=1:Len_LJD
PP(i)=(Tau(W,LJD(i))^Alpha)*(Eta(LJD(i))^Beta);
end
PP=PP/(sum(PP));%建立概率分布
Pcum=cumsum(PP);
Select=find(Pcum>=rand);
%% 第四步:狀態更新和記錄
Path=[Path,to_visit];%路徑增加
PLkm=PLkm+DD(W,to_visit);%路徑長度增加
W=to_visit;%螞蟻移到下一個節點
for kk=1:N
if TABUkm(kk)==0
DD(W,kk)=inf;
DD(kk,W)=inf;
end
end
TABUkm(W)=0;%已訪問過的節點從禁忌表中刪除
for j=1:length(DW1)
if TABUkm(DW1(j))==0
DW(j)=inf;
end
end
LJD=find(DW
Len_LJD=length(LJD);%可選節點的個數
end
%% 第五步:記下每一代每一隻螞蟻的覓食路線和路線長度
ROUTES{k,m}=Path;
if Path(end)==E
PL(k,m)=PLkm;
else
PL(k,m)=inf;
end
end
%% 第六步:更新信息素
Delta_Tau=zeros(N,N);%更新量初始化
for m=1:M
if PL(k,m) ROUT=ROUTES{k,m};
TS=length(ROUT)-1;%跳數
PL_km=PL(k,m);
for s=1:TS
x=ROUT(s);
Delta_Tau(x,y)=Delta_Tau(x,y)+Q/PL_km;
Delta_Tau(y,x)=Delta_Tau(y,x)+Q/PL_km;
end
end
end
Tau=(1-Rho).*Tau+Delta_Tau;%信息素揮發一部分,新增加一部分
end
%% ---------------------------繪圖--------------------------------
plotif=1;%是否繪圖的控制參數
if plotif==1
%繪收斂曲線
meanPL=zeros(1,K);
minPL=zeros(1,K);
for i=1:K
PLK=PL(i,:);
Nonzero=find(PLK
PLKPLK=PLK(Nonzero);
meanPL(i)=mean(PLKPLK);
minPL(i)=min(PLKPLK);
end
figure(1)
plot(minPL);
hold on
plot(meanPL);
grid on
title('收斂曲線(平均路徑長度和最小路徑長度)');
xlabel('迭代次數');
ylabel('路徑長度');
%繪爬行圖
figure(2)
axis([0,MM,0,MM])
for i=1:MM
for j=1:MM
if G(i,j)==1
x1=j-1;y1=MM-i;
x2=j;y2=MM-i;
x3=j;y3=MM-i+1;
x4=j-1;y4=MM-i+1;
fill([x1,x2,x3,x4],[y1,y2,y3,y4],[0.2,0.2,0.2]);
hold on
else
x1=j-1;y1=MM-i;
x2=j;y2=MM-i;
x3=j;y3=MM-i+1;
x4=j-1;y4=MM-i+1;
fill([x1,x2,x3,x4],[y1,y2,y3,y4],[1,1,1]);
hold on
end
end
end
hold on
ROUT=ROUTES{K,M};
LENROUT=length(ROUT);
Rx=ROUT;
Ry=ROUT;
for ii=1:LENROUT
Rx(ii)=a*(mod(ROUT(ii),MM)-0.5);
if Rx(ii)==-0.5
Rx(ii)=MM-0.5;
end
Ry(ii)=a*(MM+0.5-ceil(ROUT(ii)/MM));
end
plot(Rx,Ry)
end
plotif2=1;%繪各代螞蟻爬行圖
if plotif2==1
figure(3)
axis([0,MM,0,MM])
for i=1:MM
for j=1:MM
if G(i,j)==1
x1=j-1;y1=MM-i;
x2=j;y2=MM-i;
x3=j;y3=MM-i+1;
x4=j-1;y4=MM-i+1;
fill([x1,x2,x3,x4],[y1,y2,y3,y4],[0.2,0.2,0.2]);
hold on
else
x1=j-1;y1=MM-i;
x2=j;y2=MM-i;
x3=j;y3=MM-i+1;
x4=j-1;y4=MM-i+1;
fill([x1,x2,x3,x4],[y1,y2,y3,y4],[1,1,1]);
hold on
end
end
end
for k=1:K
PLK=PL(k,:);
minPLK=min(PLK);
pos=find(PLK==minPLK);
m=pos(1);
ROUT=ROUTES{k,m};
LENROUT=length(ROUT);
Rx=ROUT;
Ry=ROUT;
for ii=1:LENROUT
Rx(ii)=a*(mod(ROUT(ii),MM)-0.5);
if Rx(ii)==-0.5
Rx(ii)=MM-0.5;
end
Ry(ii)=a*(MM+0.5-ceil(ROUT(ii)/MM));
end
plot(Rx,Ry)
hold on
end
end
將上述演算法應用於機器人路徑規劃,優化效果如下圖所示
④ SGM半全局匹配演算法求助
SGM半全局匹配演算法用c++實現的代碼如下,僅供參考(vs中編譯通過)。
#include"stdafx.h"
#include<cstdio>
#include<cstring>
#include<iostream>
#include<cv.h>
#include<highgui.h>
#include<cmath>
usingnamespacestd;
constintWidth=1024;
constintHeight=1024;
intLvalue[Width][Width];
ucharC(intx,inty,intd,IplImage*matchImage,IplImage*baseImage)
{
uchar*pMatchPixel=NULL;
uchar*pBasePixel=NULL;
ucharmatchPixel=0;
ucharbasePixel=0;
ucharmatchMax=0;
ucharmatchMin=0;
uchartempMatch1=0;
uchartempMatch2=0;
uchardifPixel=0;
if(x+d<=matchImage->width&&x+d>=0)
{
pMatchPixel=(uchar*)matchImage->imageData+y*matchImage->widthStep+(x+d);
matchPixel=*pMatchPixel;
pBasePixel=(uchar*)baseImage->imageData+y*baseImage->widthStep+x;
basePixel=*pBasePixel;
//匹配影像上的像素點與其左、右像素點的平均值,線性插值的方法
tempMatch1=(*pMatchPixel+(*(pMatchPixel-1)))/2;
tempMatch2=(*pMatchPixel+(*(pMatchPixel+1)))/2;
matchMax=max(max(tempMatch1,tempMatch2),matchPixel);
matchMin=min(min(tempMatch1,tempMatch2),matchPixel);
//賦予視差d時的匹配代價C
//BT法
difPixel=max(max(basePixel-matchMax,matchMin-basePixel),0);
//AD法
//difPixel=abs(basePixel-matchPixel);
returndifPixel;
}
else
return255;
}
intmain()
{
IplImage*leftImage=cvLoadImage("headL.png",0);
IplImage*rightImage=cvLoadImage("headR.png",0);
intimageWidth=leftImage->width;
intimageHeight=leftImage->height;
intminLvalue=1000;
intminL1=1000;
intminL2=1000;
intP1=2;
intP2=5;
intdisparity=0;
intminDis=0;
intmaxDis=21;
intscale=12;
unsignedchar*pPixel=NULL;
#pragmaregion實現橫向DP(以右影像為准,0度方向)
IplImage*MyDPImage_0=cvCreateImage(cvGetSize(leftImage),leftImage->depth,1);
cvZero(MyDPImage_0);
intt3=clock();
for(inti=0;i<imageHeight;i++)
{
for(intj=0;j<imageWidth;j++)
{
disparity=0;
minL1=1000;
minL2=1000;
for(intd=minDis;d<=maxDis;d++)
{
//初始化為代價函數的值
Lvalue[j][d]=C(j,i,d,leftImage,rightImage);
if(j>0)
{
minL1=min(minL1,Lvalue[j-1][d]);
}
}
for(intd=minDis;d<=maxDis;d++)
{
if(j>0)
{
minL2=min(minL2,Lvalue[j-1][d]);
minL2=min(minL2,(Lvalue[j-1][d+1]+P1));
minL2=min(minL2,(Lvalue[j-1][d-1]+P1));
minL2=min(minL2,(minL1+P2));
Lvalue[j][d]=Lvalue[j][d]+(minL2-minL1);
}
}
intmax=Lvalue[j][0];
for(intd=minDis;d<=maxDis;++d)
{
if(Lvalue[j][d]<max)
{
disparity=d;
max=Lvalue[j][d];
}
}
disparity=disparity*scale;
//生成視差圖
pPixel=(uchar*)MyDPImage_0->imageData+i*MyDPImage_0->widthStep+j;
*pPixel=disparity;
}
}
intt4=clock();
cout<<"橫向DP共用時:"<<t4-t3<<"ms"<<endl;
cvNamedWindow("MyDPImage_0",1);
cvShowImage("MyDPImage_0",MyDPImage_0);
cvSaveImage("MyDPImage_0.jpg",MyDPImage_0);
#pragmaendregion
#pragmaregion實現橫向DP(以左影像為准,0度方向)
IplImage*MyDPImage_0_L=cvCreateImage(cvGetSize(leftImage),leftImage->depth,1);
cvZero(MyDPImage_0_L);
for(inti=0;i<imageHeight;i++)
{
for(intj=0;j<imageWidth;j++)
{
disparity=0;
minL1=1000;
minL2=1000;
for(intd=minDis;d<=maxDis;d++)
{
//初始化為代價函數的值
Lvalue[j][d]=C(j,i,-d,rightImage,leftImage);
if(j>0)
{
minL1=min(minL1,Lvalue[j-1][d]);
}
}
for(intd=minDis;d<=maxDis;d++)
{
if(j>0)
{
minL2=min(minL2,Lvalue[j-1][d]);
minL2=min(minL2,(Lvalue[j-1][d+1]+P1));
minL2=min(minL2,(Lvalue[j-1][d-1]+P1));
minL2=min(minL2,(minL1+P2));
Lvalue[j][d]=Lvalue[j][d]+minL2-minL1;
}
}
intmax=Lvalue[j][0];
for(intd=0;d<=maxDis;++d)
{
if(Lvalue[j][d]<max)
{
disparity=d;
max=Lvalue[j][d];
}
}
disparity=disparity*scale;
//生成視差圖
pPixel=(uchar*)MyDPImage_0_L->imageData+i*MyDPImage_0_L->widthStep+j;
*pPixel=disparity;
}
}
cvNamedWindow("MyDPImage_0_L",1);
cvShowImage("MyDPImage_0_L",MyDPImage_0_L);
cvSaveImage("MyDPImage_0_L.jpg",MyDPImage_0_L);
#pragmaendregion
#pragmaregion一致性檢查
uchar*pLeftDepthPixel=NULL;
uchar*pRightDepthPixel=NULL;
ucharleftDepthPixel=0;
ucharrightDepthPixel=0;
uchardifDepthPixel=0;
IplImage*CheckImage_0=cvCloneImage(MyDPImage_0);
cvZero(CheckImage_0);
for(inti=0;i<imageHeight;i++)
{
for(intj=0;j<imageWidth;j++)
{
pRightDepthPixel=(uchar*)MyDPImage_0->imageData+i*MyDPImage_0->widthStep+j;
rightDepthPixel=*pRightDepthPixel;
if(j+rightDepthPixel/scale<imageWidth)
{
pLeftDepthPixel=(uchar*)MyDPImage_0_L->imageData+i*MyDPImage_0_L->widthStep+j+rightDepthPixel/scale;
leftDepthPixel=*pLeftDepthPixel;
}
else
leftDepthPixel=0;
difDepthPixel=abs((leftDepthPixel-rightDepthPixel)/scale);
pPixel=(uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+j;
if(difDepthPixel<=1)
{
*pPixel=rightDepthPixel;
}
else
{
//否則,當前像素的視差值為零
*pPixel=0;
}
}
}
intlp,rp;
intlx,rx;
for(inti=0;i<imageHeight;i++)
{
for(intj=0;j<imageWidth;j++)
{
inttem=*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+j);
if(tem==0)
{
lp=rp=0;
lx=j;
rx=j;
if(lx-1<0)
lp=*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+lx);
while((lp==0)&&(lx-1>=0))
lp=*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+(--lx));
if(rx+1>=imageWidth)
rp=*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+rx);
while((rp==0)&&(rx+1<imageWidth))
rp=*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+(++rx));
if(lp>rp)
{
*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+j)=rp;
}
else
{
*((uchar*)CheckImage_0->imageData+i*CheckImage_0->widthStep+j)=lp;
}
}
}
}
cvSmooth(CheckImage_0,CheckImage_0,CV_MEDIAN,3,0,0);
cvNamedWindow("CheckImage_0",1);
cvShowImage("CheckImage_0",CheckImage_0);
cvSaveImage("CheckImage_0.jpg",CheckImage_0);
#pragmaendregion
cout<<"完成!"<<endl;
cvWaitKey(0);
return0;
}
⑤ 畫橢圓的演算法
#include<stdio.h>
#include<graphics.h>
#include<math.h>
#include<time.h>
#include<conio.h>
void ellipsepoint(int x,int y,int value,int rx,int ry)
{
putpixel((int)rx+x,(int)ry+y,value);
putpixel((int)rx-x,(int)ry+y,value);
putpixel((int)rx+x,(int)ry-y,value);
putpixel((int)rx-x,(int)ry-y,value);
}
void MidPointEllipse(int a,int b,int value,int rx,int ry)
{
long x=0;
long y=b;
long sa=a*a,sb=b*b;
long xp=(long)((float)sa/(float)sqrt((float)(sa+sb)));
long yp=(long)((float)sb/(float)sqrt((float)(sa+sb)));
long d=sb-sa*(b-0.25);
ellipsepoint(x,y,value,0,0);
while(x<xp)
{
if(d<0)
{ d+=sb*(2*x+3);
x++; }
else
{
d+=sb*(2*x+3)+sa*(-2*y+2);
x++;
y--;
}
ellipsepoint(x,y,value,rx,ry);
}
x=a;y=0;d=4*sa+sb-4*a*sb;
while(y<yp)
{
if(d<0)
{
d+=sa*(2*y+3);
y++;
}
else
{
d+=sa*(2*y+3)+sb*(2-2*x);
y++;
x--;
}
ellipsepoint(x,y,value,rx,ry);
}
}
int main()
{
int gdriver,gmode;
gdriver=VGA;
gmode=VGAHI;
registerbgidriver(EGAVGA_driver);
initgraph(&gdriver,&gmode,"");
MidPointEllipse(50,30,5,100,100);
getch();
closegraph();
return 0;
}
⑥ 求大神給出基於粒子群演算法的多目標搜索演算法的完整程序。。。從目標函數到最後。。
%% 該函數演示多目標perota優化問題
%清空環境
clc
clear
load data
%% 初始參數
objnum=size(P,1); %類中物品個數
weight=92; %總重量限制
%初始化程序
Dim=5; %粒子維數
xSize=50; %種群個數
MaxIt=200; %迭代次數
c1=0.8; %演算法參數
c2=0.8; %演算法參數
wmax=1.2; %慣性因子
wmin=0.1; %慣性因子
x=unidrnd(4,xSize,Dim); %粒子初始化
v=zeros(xSize,Dim); %速度初始化
xbest=x; %個體最佳值
gbest=x(1,:); %粒子群最佳位置
% 粒子適應度值
px=zeros(1,xSize); %粒子價值目標
rx=zeros(1,xSize); %粒子體積目標
cx=zeros(1,xSize); %重量約束
% 最優值初始化
pxbest=zeros(1,xSize); %粒子最優價值目標
rxbest=zeros(1,xSize); %粒子最優體積目標
cxbest=zeros(1,xSize); %記錄重量,以求約束
% 上一次的值
pxPrior=zeros(1,xSize);%粒子價值目標
rxPrior=zeros(1,xSize);%粒子體積目標
cxPrior=zeros(1,xSize);%記錄重量,以求約束
%計算初始目標向量
for i=1:xSize
for j=1:Dim %控制類別
px(i) = px(i)+P(x(i,j),j); %粒子價值
rx(i) = rx(i)+R(x(i,j),j); %粒子體積
cx(i) = cx(i)+C(x(i,j),j); %粒子重量
end
end
% 粒子最優位置
pxbest=px;rxbest=rx;cxbest=cx;
%% 初始篩選非劣解
flj=[];
fljx=[];
fljNum=0;
%兩個實數相等精度
tol=1e-7;
for i=1:xSize
flag=0; %支配標志
for j=1:xSize
if j~=i
if ((px(i)<px(j)) && (rx(i)>rx(j))) ||((abs(px(i)-px(j))<tol)...
&& (rx(i)>rx(j)))||((px(i)<px(j)) && (abs(rx(i)-rx(j))<tol)) || (cx(i)>weight)
flag=1;
break;
end
end
end
%判斷有無被支配
if flag==0
fljNum=fljNum+1;
% 記錄非劣解
flj(fljNum,1)=px(i);flj(fljNum,2)=rx(i);flj(fljNum,3)=cx(i);
% 非劣解位置
fljx(fljNum,:)=x(i,:);
end
end
%% 循環迭代
for iter=1:MaxIt
% 權值更新
w=wmax-(wmax-wmin)*iter/MaxIt;
%從非劣解中選擇粒子作為全局最優解
s=size(fljx,1);
index=randi(s,1,1);
gbest=fljx(index,:);
%% 群體更新
for i=1:xSize
%速度更新
v(i,:)=w*v(i,:)+c1*rand(1,1)*(xbest(i,:)-x(i,:))+c2*rand(1,1)*(gbest-x(i,:));
%位置更新
x(i,:)=x(i,:)+v(i,:);
x(i,:) = rem(x(i,:),objnum)/double(objnum);
index1=find(x(i,:)<=0);
if ~isempty(index1)
x(i,index1)=rand(size(index1));
end
x(i,:)=ceil(4*x(i,:));
end
%% 計算個體適應度
pxPrior(:)=0;
rxPrior(:)=0;
cxPrior(:)=0;
for i=1:xSize
for j=1:Dim %控制類別
pxPrior(i) = pxPrior(i)+P(x(i,j),j); %計算粒子i 價值
rxPrior(i) = rxPrior(i)+R(x(i,j),j); %計算粒子i 體積
cxPrior(i) = cxPrior(i)+C(x(i,j),j); %計算粒子i 重量
end
end
%% 更新粒子歷史最佳
for i=1:xSize
%現在的支配原有的,替代原有的
if ((px(i)<pxPrior(i)) && (rx(i)>rxPrior(i))) ||((abs(px(i)-pxPrior(i))<tol)...
&& (rx(i)>rxPrior(i)))||((px(i)<pxPrior(i)) && (abs(rx(i)-rxPrior(i))<tol)) || (cx(i)>weight)
xbest(i,:)=x(i,:);%沒有記錄目標值
pxbest(i)=pxPrior(i);rxbest(i)=rxPrior(i);cxbest(i)=cxPrior(i);
end
%彼此不受支配,隨機決定
if ~( ((px(i)<pxPrior(i)) && (rx(i)>rxPrior(i))) ||((abs(px(i)-pxPrior(i))<tol)...
&& (rx(i)>rxPrior(i)))||((px(i)<pxPrior(i)) && (abs(rx(i)-rxPrior(i))<tol)) || (cx(i)>weight) )...
&& ~( ((pxPrior(i)<px(i)) && (rxPrior(i)>rx(i))) ||((abs(pxPrior(i)-px(i))<tol) && (rxPrior(i)>rx(i)))...
||((pxPrior(i)<px(i)) && (abs(rxPrior(i)-rx(i))<tol)) || (cxPrior(i)>weight) )
if rand(1,1)<0.5
xbest(i,:)=x(i,:);
pxbest(i)=pxPrior(i);rxbest(i)=rxPrior(i);cxbest(i)=cxPrior(i);
end
end
end
%% 更新非劣解集合
px=pxPrior;
rx=rxPrior;
cx=cxPrior;
%更新升級非劣解集合
s=size(flj,1);%目前非劣解集合中元素個數
%先將非劣解集合和xbest合並
pppx=zeros(1,s+xSize);
rrrx=zeros(1,s+xSize);
cccx=zeros(1,s+xSize);
pppx(1:xSize)=pxbest;pppx(xSize+1:end)=flj(:,1)';
rrrx(1:xSize)=rxbest;rrrx(xSize+1:end)=flj(:,2)';
cccx(1:xSize)=cxbest;cccx(xSize+1:end)=flj(:,3)';
xxbest=zeros(s+xSize,Dim);
xxbest(1:xSize,:)=xbest;
xxbest(xSize+1:end,:)=fljx;
%篩選非劣解
flj=[];
fljx=[];
k=0;
tol=1e-7;
for i=1:xSize+s
flag=0;%沒有被支配
%判斷該點是否非劣
for j=1:xSize+s
if j~=i
if ((pppx(i)<pppx(j)) && (rrrx(i)>rrrx(j))) ||((abs(pppx(i)-pppx(j))<tol) ...
&& (rrrx(i)>rrrx(j)))||((pppx(i)<pppx(j)) && (abs(rrrx(i)-rrrx(j))<tol)) ...
|| (cccx(i)>weight) %有一次被支配
flag=1;
break;
end
end
end
%判斷有無被支配
if flag==0
k=k+1;
flj(k,1)=pppx(i);flj(k,2)=rrrx(i);flj(k,3)=cccx(i);%記錄非劣解
fljx(k,:)=xxbest(i,:);%非劣解位置
end
end
%去掉重復粒子
repflag=0; %重復標志
k=1; %不同非劣解粒子數
flj2=[]; %存儲不同非劣解
fljx2=[]; %存儲不同非劣解粒子位置
flj2(k,:)=flj(1,:);
fljx2(k,:)=fljx(1,:);
for j=2:size(flj,1)
repflag=0; %重復標志
for i=1:size(flj2,1)
result=(fljx(j,:)==fljx2(i,:));
if length(find(result==1))==Dim
repflag=1;%有重復
end
end
%粒子不同,存儲
if repflag==0
k=k+1;
flj2(k,:)=flj(j,:);
fljx2(k,:)=fljx(j,:);
end
end
%非劣解更新
flj=flj2;
fljx=fljx2;
end
%繪制非劣解分布
plot(flj(:,1),flj(:,2),'o')
xlabel('P')
ylabel('R')
title('最終非劣解在目標空間分布')
disp('非劣解flj中三列依次為P,R,C')
⑦ RxRx0.617=公斤是不是鋼筋重量的標准演算法
是標准演算法。正確的說。每米
⑧ rx global mask什麼意思
rx-global-mask
接收全球面膜
雙語例句
1. There's no Rx for unemployment.
失業問題無法解決。
來自《權威詞典》
2. Secondly, RX, GMRF and SEM, three representational algorithms of anomaly detection are studied.
其次, 研究了具有代表性的三種奇異檢測演算法:RX演算法 、 基於高斯馬爾可夫隨機場模型(GMRF)的檢測演算法和基於隨機最大期望(SEM)分類的檢測演算法.
⑨ 用什麼演算法確定保險用戶是欺詐用戶
1、 數據類型轉換:因」gender」實際代表男性、女性用戶,故需要將「gender」變數轉換成因子變數,且因子水平用「F」替換1,「M」替換2;將「fraudRisk」變數也轉換成因子變數。
答:首先將數據導入到R中,並查看數據維度:
> # 導入數據> ccFraud <- read.csv("ccFraud.csv")> # 查看數據維度> str(ccFraud)'data.frame': 10000000 obs. of 9 variables:$ custID : int 1 2 3 4 5 6 7 8 9 10 ...$ gender : int 1 2 2 1 1 2 1 1 2 1 ...$ state : int 35 2 2 15 46 44 3 10 32 23 ...$ cardholder : int 1 1 1 1 1 2 1 1 1 1 ...$ balance : int 3000 0 0 0 0 5546 2000 6016 2428 0 ...$ numTrans : int 4 9 27 12 11 21 41 20 4 18 ...$ numIntlTrans: int 14 0 9 0 16 0 0 3 10 56 ...$ creditLine : int 2 18 16 5 7 13 1 6 22 5 ...$ fraudRisk : int 0 0 0 0 0 0 0 0 0 0 ...
ccFraud數據集一共有一千萬行9列,各列均為整型變數。按照題目要求先將「gender」變數轉變為因子型,且因子水平用「F」替換1,「M」替換2。代碼如下:
> ccFraud$gender <- factor(ifelse(ccFraud$gender==1,'F','M'))> str(ccFraud)'data.frame': 10000000 obs. of 9 variables:$ custID : int 1 2 3 4 5 6 7 8 9 10 ...$ gender : Factor w/ 2 levels "F","M": 1 2 2 1 1 2 1 1 2 1 ...$ state : int 35 2 2 15 46 44 3 10 32 23 ...$ cardholder : int 1 1 1 1 1 2 1 1 1 1 ...$ balance : int 3000 0 0 0 0 5546 2000 6016 2428 0 ...$ numTrans : int 4 9 27 12 11 21 41 20 4 18 ...$ numIntlTrans: int 14 0 9 0 16 0 0 3 10 56 ...$ creditLine : int 2 18 16 5 7 13 1 6 22 5 ...$ fraudRisk : int 0 0 0 0 0 0 0 0 0 0 ...
將「fraudRisk」變數也轉換成因子變數,代碼如下:
>ccFraud$fraudRisk <- as.factor(ccFraud$fraudRisk)>str(ccFraud)'data.frame': 10000000 obs. of 9 variables: $ custID : int 1 2 3 4 5 6 7 8 9 10 ... $ gender : Factor w/ 2 levels "F","M": 1 2 2 1 1 2 1 1 2 1... $ state : int 35 2 2 15 46 44 3 10 32 23... $ cardholder : int 1 1 1 1 1 2 1 1 1 1 ... $ balance : int 3000 0 0 0 0 5546 2000 60162428 0 ... $ numTrans : int 4 9 27 12 11 21 41 20 4 18... $ numIntlTrans: int 14 0 9 0 16 0 0 3 10 56 ... $ creditLine : int 2 18 16 5 7 13 1 6 22 5 ... $ fraudRisk : Factor w/ 2 levels"0","1": 1 1 1 1 1 1 1 1 1 1 ...
2、 數據探索:查看「fraudRisk」變數中0、1的頻數及佔比情況。
答:此題是送分題,通過table函數、prop.table函數即可實現。代碼如下:
> table(ccFraud$fraudRisk) 0 1 9403986 596014 > prop.table(table(ccFraud$fraudRisk)) 0 1 0.9403986 0.0596014
3、 數據分區:需要按照變數fraudRisk來進行等比例抽樣,其中80%作為訓練集train數據,20%作為測試集test數據。
答:由於要根據fraudRisk變數進行等比例抽樣,我們用caret包的createDataPartition函數實現。代碼如下:
> library(caret)載入需要的程輯包:lattice載入需要的程輯包:ggplot2> idx <- createDataPartition(ccFraud$fraudRisk,p=0.8,list=F)> train <- ccFraud[idx,]> test <- ccFraud[-idx,]> prop.table(table(train$fraudRisk)) 0 1 0.94039851 0.05960149 > prop.table(table(test$fraudRisk)) 0 1 0.94039897 0.05960103
4、 建立模型:利用至少三種常見的分類演算法(如KNN近鄰演算法、決策樹演算法、隨機森林等)對數據建立預測模型。
答:由於數據量較大,學員反饋運行慢,這邊利用MicrosoftML包來跑模型。關於MRS的快速入門請查閱之前文章:https://ask.hellobi.com/blog/xiejiabiao/8559
> # 模型一:利用MicrosoftML包的rxFastTrees()函數構建快速決策樹模型> (a <- Sys.time()) #模型運行前時間[1] "2017-09-03 23:32:04 CST"> treeModel <- rxFastTrees(fraudRisk ~ gender + cardholder + balance + numTrans+ + numIntlTrans + creditLine,data = train)Not adding a normalizer.Making per-feature arraysChanging data from row-wise to column-wiseBeginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Processed 8000001 instancesBinning and forming Feature objectsReserved memory for tree learner: 79664 bytesStarting to train ...Not training a calibrator because it is not needed.Elapsed time: 00:01:04.6222538> (b <- Sys.time()) #模型運行後時間[1] "2017-09-03 23:33:09 CST"> b-a # 模型運行時長Time difference of 1.086313 mins> # 模型二:利用MicrosoftML包的rxFastForest()函數構建快速隨機森林模型> (a <- Sys.time()) #模型運行前時間[1] "2017-09-03 23:33:31 CST"> forestModel <- rxFastForest(fraudRisk ~ gender + cardholder + balance + numTrans+ + numIntlTrans + creditLine,data = train)Not adding a normalizer.Making per-feature arraysChanging data from row-wise to column-wiseBeginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Processed 8000001 instancesBinning and forming Feature objectsReserved memory for tree learner: 79664 bytesStarting to train ...Training calibrator.Beginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Elapsed time: 00:01:25.4585776> (b <- Sys.time()) #FastTrees模型運行後時間[1] "2017-09-03 23:34:57 CST"> b-a # 模型運行時長Time difference of 1.433823 mins> # 模型三:利用MicrosoftML包的rxLogisticRegression()函數構建快速邏輯回歸模型> (a <- Sys.time()) #模型運行前時間[1] "2017-09-03 23:34:57 CST"> logitModel <- rxLogisticRegression(fraudRisk ~ gender + cardholder + balance + numTrans+ + numIntlTrans + creditLine,data = train)Automatically adding a MinMax normalization transform, use 'norm=Warn' or 'norm=No' to turn this behavior off.Beginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Beginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Beginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.LBFGS multi-threading will attempt to load dataset into memory. In case of out-of-memory issues, turn off multi-threading by setting trainThreads to 1.Beginning optimizationnum vars: 8improvement criterion: Mean ImprovementL1 regularization selected 8 of 8 weights.Not training a calibrator because it is not needed.Elapsed time: 00:00:19.5887244Elapsed time: 00:00:00.0383181> (b <- Sys.time()) #模型運行後時間[1] "2017-09-03 23:35:17 CST"> b-a # 模型運行時長Time difference of 20.27396 secs>
邏輯回歸模型運行時間最短,消耗20.3秒,其次是決策樹,消耗1.08分鍾,時間最長的是隨機森林,時長為1.4分鍾。
5、 模型評估:利用上面建立的預測模型(至少第四點建立的三個模型),對訓練集和測試集數據進行預測,評估模型效果,最終選擇最優模型作為以後的業務預測模型。(提示:構建混淆矩陣)
答:針對上面的三種模型,我們分別對train、test數據集進行預測並評估。
# 利用決策樹模型對數據進行預測,並計算誤差率> treePred_tr <- rxPredict(treeModel,data = train)Beginning processing data.Rows Read: 8000001, Read Time: 0, Transform Time: 0Beginning processing data.Elapsed time: 00:00:52.1015119Finished writing 8000001 rows.Writing completed.> t <- table(train$fraudRisk,treePred_tr$PredictedLabel)> t 0 1 0 7446742 76447 1 253008 223804> (paste0(round((sum(t)-sum(diag(t)))/sum(t),3)*100,"%")) #計算決策樹對train數據集的預測誤差率[1] "4.1%"> treePred_te <- rxPredict(treeModel,data = test)Beginning processing data.Rows Read: 1999999, Read Time: 0, Transform Time: 0Beginning processing data.Elapsed time: 00:00:13.4980323Finished writing 1999999 rows.Writing completed.> t1 <- table(test$fraudRisk,treePred_te$PredictedLabel)> t1 0 1 0 1861406 19391 1 63176 56026> (paste0(round((sum(t1)-sum(diag(t1)))/sum(t1),3)*100,"%")) #計算決策樹對test數據集的預測誤差率[1] "4.1%"> # 利用隨機森林模型對數據進行預測,並計算誤差率> forestPred_tr <- rxPredict(forestModel,data = train)Beginning processing data.Rows Read: 8000001, Read Time: 0.001, Transform Time: 0Beginning processing data.Elapsed time: 00:00:56.2862657Finished writing 8000001 rows.Writing completed.> t <- table(train$fraudRisk,forestPred_tr$PredictedLabel)> t 0 1 0 7508808 14381 1 373777 103035> (paste0(round((sum(t)-sum(diag(t)))/sum(t),3)*100,"%")) #計算隨機森林對train數據集的預測誤差率[1] "4.9%"> forestPred_te <- rxPredict(forestModel,data = test)Beginning processing data.Rows Read: 1999999, Read Time: 0.001, Transform Time: 0Beginning processing data.Elapsed time: 00:00:14.0430130Finished writing 1999999 rows.Writing completed.> t1 <- table(test$fraudRisk,forestPred_te$PredictedLabel)> t1 0 1 0 1877117 3680 1 93419 25783> (paste0(round((sum(t1)-sum(diag(t1)))/sum(t),3)*100,"%")) #計算隨機森林對test數據集的預測誤差率[1] "1.2%"> # 利用邏輯回歸模型對數據進行預測,並計算誤差率> logitPred_tr <- rxPredict(logitModel,data = train)Beginning processing data.Rows Read: 8000001, Read Time: 0.001, Transform Time: 0Beginning processing data.Elapsed time: 00:00:08.1674394Finished writing 8000001 rows.Writing completed.> t <- table(train$fraudRisk,logitPred_tr$PredictedLabel)> t 0 1 0 7444156 79033 1 250679 226133> (paste0(round((sum(t)-sum(diag(t)))/sum(t),3)*100,"%")) #計算邏輯回歸對train數據集的預測誤差率[1] "4.1%"> logitPred_te <- rxPredict(logitModel,data = test)Beginning processing data.Rows Read: 1999999, Read Time: 0, Transform Time: 0Beginning processing data.Elapsed time: 00:00:02.0736547Finished writing 1999999 rows.Writing completed.> t1 <- table(test$fraudRisk,logitPred_te$PredictedLabel)> t1 0 1 0 1860885 19912 1 62428 56774> (paste0(round((sum(t1)-sum(diag(t1)))/sum(t),3)*100,"%")) #計算邏輯回歸對test數據集的預測誤差率[1] "1%"
從訓練集和測試集的預測誤差率來看,對於此份數據,邏輯回歸是最優的選擇。