c语言-lm_LM算法的more1978

c语言-lm_LM算法的more1978#pragmaonce#include#include”opencv2\core\core.hpp”#pragmacomment(lib,”opencv_core248d.lib”)constintMAXTIME=50;usingnamespacecv;FileStoragefs;Matjacobin(constMat&pk/*[a,b]*/,

大家好,又见面了,我是你们的朋友全栈君。如果您正在找激活码,请点击查看最新教程,关注关注公众号 “全栈程序员社区” 获取激活教程,可能之前旧版本教程已经失效.最新Idea2022.1教程亲测有效,一键激活。

Jetbrains全系列IDE使用 1年只要46元 售后保障 童叟无欺

这是一个数据拟合的例子,并没有采用面向对象的设计方法是使能更好的理解LM算法的流程,简约而不简单。算法详细过程不多介绍。程序中用到opencv库中的矩阵类Mat。

例:c语言-lm_LM算法的more1978

#pragma  once
#include <stdio.h>
#include "opencv2\core\core.hpp"

#pragma comment(lib,"opencv_core248d.lib")

const int  MAXTIME = 50;

using namespace cv;
FileStorage fs;

Mat jacobin(const Mat& pk/*[a,b]*/, const Mat& x); //f = a*exp(-b*x)
Mat yEstimate(const Mat& p, const Mat& x);
inline void outData(FileStorage& fs, Mat & m, char* filename)
{
	fs.open(filename, FileStorage::FORMAT_XML | FileStorage::WRITE);
	char *temp = new char[10]; 
	strcpy_s(temp, 10,filename); 
	*strchr(temp, '.') = '\0';
	fs << temp << m; 
	fs.release();
	delete[] temp;
}

void LM(double* p0, int pN, double* x, int xN, double* y, double lamda, double step, double ep = 0.0001)
{
	int iters = 0;
	int updateJ = 1;
	double ek = 0.0, ekk = 0.0;//估计误差
	Mat_<double> xM(xN, 1, x), yM(xN, 1, y), pM(pN, 1, p0), JM, yEM, yEMM, dM, gM, dMM, dpM;//至少需要JM,gM,dpM,pM
	for (; iters < MAXTIME; iters++)
	{
		if (updateJ == 1)
		{
			JM = jacobin(pM, xM);
			//outData(fs, JM, "J.xml");
			yEM = yEstimate(pM, xM);
			dM = yM - yEM;
			gM = JM.t()*dM;
			if (iters == 0)
				ek = dM.dot(dM);
			//outData(fs, dM, "d.xml");
		}
		Mat_<double> NM = JM.t()*JM + lamda*(Mat::eye(pN, pN, CV_64F));
		if (solve(NM, gM, dpM))
		{
			Mat_<double> pMM = pM + dpM;
			yEMM = yEstimate(pMM, xM);
			dMM = yM - yEMM;
			ekk = dMM.dot(dMM);
			//outData(fs, dMM, "dlm.xml");
			//outData(fs, dpM, "dp.xml");
			if (ekk < ek)//成功则更新向量与估计误差
			{
				printf("the %d iterator result\n", iters);
				if (dpM.dot(dpM) < ep)
				{
					outData(fs, pM, "p.xml");
					return;
				}
				else
				{
					pM = pMM;
					ek = ekk;
					lamda = lamda / step;
					updateJ = 1;
					continue;
				}
			}
			else
			{
				lamda = lamda*step;
				updateJ = 0;
			}
		}
		else
		{
			outData(fs, JM, "badJ.xml");
			//return;
		}
	}
}

Mat jacobin(const Mat& pk/*[a,b]*/, const Mat& x)
{
	Mat_<double> J(x.rows, pk.rows), da, db;
	exp(-pk.at<double>(1)*x, da);
	exp(-pk.at<double>(1)*x, db);
	db = -pk.at<double>(0)*x.mul(db);
	//outData(fs, da, "da.xml");
	//outData(fs, db, "db.xml");
	da.copyTo(J(Rect(0, 0, 1, J.rows)));
	db.copyTo(J(Rect(1, 0, 1, J.rows)));
	return J;
}
Mat yEstimate(const Mat& p, const Mat& x)
{
	Mat_<double> Y(x.rows, x.cols);
	exp(-p.at<double>(1)*x, Y);
	Y = p.at<double>(0)*Y;
	return Y;
}

#include "LMM.h"

int main()
{
	double data[] = { 0.25, 0.5, 1, 1.5, 2, 3, 4, 6, 8 };
	double obs[] = { 19.21, 18.15, 15.36, 14.10, 12.89, 9.32, 7.45, 5.24, 3.01 };
	double p0[] = { 1, 1 };
	LM(p0, 2, data, 9, obs, 0.01, 10);
}

版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请联系我们举报,一经查实,本站将立刻删除。

发布者:全栈程序员-站长,转载请注明出处:https://javaforall.net/188641.html原文链接:https://javaforall.net

(0)
全栈程序员-站长的头像全栈程序员-站长


相关推荐

发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注

关注全栈程序员社区公众号