在之前的章节里面(最优化理论与自动驾驶(二):求解算法)我们展示了最优化理论的基础求解算法,包括高斯-牛顿法(Gauss-Newton Method)、梯度下降法(Gradient Descent Method)、牛顿法(Newton's Method)和勒文贝格-马夸尔特法(Levenberg-Marquardt Method, LM方法)法。在实际工程应用中,我们一般采用C++进行开发,所以本文补充了上述求解方法的C++代码。在实际应用中,我们既可以自己进行简单的求解,也可以采用第三方库进行求解。我们列举了三种方式:1)直接使用c++ vector容器 2)采用eigen库进行迭代计算 3)采用eigen库封装好的函数求解,工程应用中建议使用eigen库进行矩阵操作,因为底层进行了大量的优化,包括SIMD指令集优化、懒惰求值策略等。
C++示例代码如下:
以指数衰减模型 为例,通过不同方法获得最小二乘拟合参数,其中参数为a和b。
1. 梯度下降法
1.1 使用C++ vector容器
#include <iostream>
#include <vector>
#include <cmath>
#include <limits>
// 定义指数衰减模型函数 y = a * exp(b * x)
double model(const std::vector<double>& params, double x) {
double a = params[0];
double b = params[1];
return a * std::exp(b * x);
}
// 定义残差函数
std::vector<double> residuals(const std::vector<double>& params, const std::vector<double>& x, const std::vector<double>& y) {
std::vector<double> res(x.size());
for (size_t i = 0; i < x.size(); ++i) {
res[i] = model(params, x[i]) - y[i];
}
return res;
}
// 计算目标函数(即平方和)
double objective_function(const std::vector<double>& params, const std::vector<double>& x, const std::vector<double>& y) {
std::vector<double> res = residuals(params, x, y);
double sum = 0.0;
for (double r : res) {
sum += r * r;
}
return 0.5 * sum;
}
// 计算梯度
std::vector<double> compute_gradient(const std::vector<double>& params, const std::vector<double>& x, const std::vector<double>& y) {
double a = params[0];
double b = params[1];
std::vector<double> res = residuals(params, x, y);
// 梯度计算
double grad_a = 0.0;
double grad_b = 0.0;
for (size_t i = 0; i < x.size(); ++i) {
grad_a += res[i] * std::exp(b * x[i]); // 对 a 的偏导数
grad_b += res[i] * a * x[i] * std::exp(b * x[i]); // 对 b 的偏导数
}
return {grad_a, grad_b};
}
// 梯度下降法
std::vector<double> gradient_descent(const std::vector<double>& x, const std::vector<double>& y, const std::vector<double>& initial_params,
double learning_rate = 0.01, int max_iter = 10000, double tol = 1e-6) {
std::vector<double> params = initial_params;
for (int i = 0; i < max_iter; ++i) {
// 计算梯度
std::vector<double> gradient = compute_gradient(params, x, y);
// 更新参数
std::vector<double> params_new = {params[0] - learning_rate * gradient[0], params[1] - learning_rate * gradient[1]};
// 检查收敛条件
double diff = std::sqrt(std::pow(params_new[0] - params[0], 2) + std::pow(params_new[1] - params[1], 2));
if (diff < tol) {
std::cout << "Converged after " << i + 1 << " iterations." << std::endl;
return params_new;
}
params = params_new;
}
std::cout << "Max iterations exceeded. No solution found." << std::endl;
return params;
}
int main() {
// 示例数据
std::vector<double> x_data = {0, 1, 2, 3, 4, 5};
std::vector<double> y_data;
for (double x : x_data) {
y_data.push_back(2 * std::exp(-1 * x));
}
// 初始参数猜测 (a, b)
std::vector<double> initial_guess = {1.0, -1.0};
// 执行梯度下降法
std::vector<double> solution = gradient_descent(x_data, y_data, initial_guess);
// 输出结果
std::cout << "Fitted parameters: a = " << solution[0] << ", b = " << solution[1] << std::endl;
return 0;
}
1.2 使用eigen库
#include <iostream>
#include <Eigen/Dense>
#include <cmath>
using Eigen::VectorXd;
// 定义指数衰减模型函数 y = a * exp(b * x)
VectorXd model(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
return a * (x.array() * b).exp().matrix(); // 使用 Eigen 的数组和矩阵运算
}
// 定义残差函数
VectorXd residuals(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
return model(params, x) - y;
}
// 计算目标函数(即平方和)
double objective_function(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
VectorXd res = residuals(params, x, y);
return 0.5 * res.squaredNorm(); // 使用 Eigen 的 squaredNorm() 计算平方和
}
// 计算梯度
VectorXd compute_gradient(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
double a = params(0);
double b = params(1);
VectorXd res = residuals(params, x, y);
// 梯度计算
double grad_a = (res.array() * (b * x.array()).exp()).sum(); // 对 a 的偏导数
double grad_b = (res.array() * a * x.array() * (b * x.array()).exp()).sum(); // 对 b 的偏导数
VectorXd gradient(2);
gradient << grad_a, grad_b;
return gradient;
}
// 梯度下降法
VectorXd gradient_descent(const VectorXd& x, const VectorXd& y, const VectorXd& initial_params,
double learning_rate = 0.01, int max_iter = 10000, double tol = 1e-6) {
VectorXd params = initial_params;
for (int i = 0; i < max_iter; ++i) {
// 计算梯度
VectorXd gradient = compute_gradient(params, x, y);
// 更新参数
VectorXd params_new = params - learning_rate * gradient;
// 检查收敛条件
if ((params_new - params).norm() < tol) {
std::cout << "Converged after " << i + 1 << " iterations." << std::endl;
return params_new;
}
params = params_new;
}
std::cout << "Max iterations exceeded. No solution found." << std::endl;
return params;
}
int main() {
// 示例数据
VectorXd x_data(6);
x_data << 0, 1, 2, 3, 4, 5;
VectorXd y_data = 2 * (-1 * x_data.array()).exp();
// 初始参数猜测 (a, b)
VectorXd initial_guess(2);
initial_guess << 1.0, -1.0;
// 执行梯度下降法
VectorXd solution = gradient_descent(x_data, y_data, initial_guess);
// 输出结果
std::cout << "Fitted parameters: a = " << solution(0) << ", b = " << solution(1) << std::endl;
return 0;
}
1.3 使用eigen库QR分解
#include <iostream>
#include <Eigen/Dense>
#include <cmath>
using Eigen::VectorXd;
using Eigen::MatrixXd;
int main() {
// 示例数据
VectorXd x_data(6);
x_data << 0, 1, 2, 3, 4, 5;
VectorXd y_data(6);
for (int i = 0; i < 6; ++i) {
y_data(i) = 2 * std::exp(-1 * x_data(i));
}
// 对 y_data 取对数以转换为线性方程 ln(y) = ln(a) + b * x
VectorXd log_y_data = y_data.array().log();
// 构造线性方程的矩阵形式 A * params = log(y)
// A 是 x_data 的增广矩阵 [1, x_data]
MatrixXd A(x_data.size(), 2);
A.col(0) = VectorXd::Ones(x_data.size()); // 第一列为 1
A.col(1) = x_data; // 第二列为 x_data
// 使用最小二乘法求解参数 [ln(a), b]
VectorXd params = A.colPivHouseholderQr().solve(log_y_data);
// 提取参数 ln(a) 和 b
double log_a = params(0);
double b = params(1);
double a = std::exp(log_a); // 还原 a
// 输出拟合结果
std::cout << "Fitted parameters: a = " << a << ", b = " << b << std::endl;
return 0;
}
2. 牛顿法
#include <iostream>
#include <Eigen/Dense>
#include <cmath>
using Eigen::VectorXd;
using Eigen::MatrixXd;
// 定义模型函数 y = a * exp(b * x)
VectorXd model(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
return a * (x.array() * b).exp().matrix(); // 使用 Eigen 的数组和矩阵运算
}
// 定义残差函数
VectorXd residuals(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
return model(params, x) - y;
}
// 计算梯度和 Hessian 矩阵
std::pair<VectorXd, MatrixXd> gradient_and_hessian(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
double a = params(0);
double b = params(1);
VectorXd res = residuals(params, x, y);
// 计算雅可比矩阵 J
MatrixXd J(x.size(), 2);
J.col(0) = (b * x.array()).exp(); // 对 a 的偏导数
J.col(1) = a * x.array() * (b * x.array()).exp(); // 对 b 的偏导数
// 计算梯度:g = J.transpose() * res
VectorXd gradient = J.transpose() * res;
// 计算 Hessian:H = J.transpose() * J + 二阶项(这里省略二阶项,只保留 J.T * J)
MatrixXd Hessian = J.transpose() * J;
return {gradient, Hessian};
}
// 牛顿法求解
VectorXd newton_method(const VectorXd& x, const VectorXd& y, const VectorXd& initial_params, int max_iter = 100, double tol = 1e-6) {
VectorXd params = initial_params;
for (int i = 0; i < max_iter; ++i) {
auto [gradient, Hessian] = gradient_and_hessian(params, x, y);
// 检查 Hessian 是否可逆
if (Hessian.determinant() == 0) {
std::cerr << "Hessian matrix is singular." << std::endl;
return VectorXd();
}
// 更新参数:params_new = params - H.inverse() * gradient
VectorXd delta_params = Hessian.inverse() * gradient;
VectorXd params_new = params - delta_params;
// 检查收敛条件
if (delta_params.norm() < tol) {
std::cout << "Converged after " << i + 1 << " iterations." << std::endl;
return params_new;
}
params = params_new;
}
std::cerr << "Max iterations exceeded. No solution found." << std::endl;
return params;
}
int main() {
// 示例数据
VectorXd x_data(6);
x_data << 0, 1, 2, 3, 4, 5;
VectorXd y_data(6);
y_data = 2 * (-1 * x_data.array()).exp();
// 初始参数猜测 (a, b)
VectorXd initial_guess(2);
initial_guess << 1.0, -1.0;
// 执行牛顿法
VectorXd solution = newton_method(x_data, y_data, initial_guess);
// 输出结果
if (solution.size() > 0) {
std::cout << "Fitted parameters: a = " << solution(0) << ", b = " << solution(1) << std::endl;
} else {
std::cout << "No solution found." << std::endl;
}
return 0;
}
3. 高斯牛顿法
#include <iostream>
#include <Eigen/Dense>
#include <cmath>
using Eigen::VectorXd;
using Eigen::MatrixXd;
// 定义模型函数 y = a * exp(b * x)
VectorXd model(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
return a * (b * x.array()).exp();
}
// 定义残差函数
VectorXd residuals(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
return model(params, x) - y;
}
// 定义雅可比矩阵
MatrixXd jacobian(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
MatrixXd J(x.size(), 2);
J.col(0) = (b * x.array()).exp(); // 对 a 的偏导数
J.col(1) = a * x.array() * (b * x.array()).exp(); // 对 b 的偏导数
return J;
}
// 高斯牛顿法函数
VectorXd gauss_newton(const VectorXd& x, const VectorXd& y, const VectorXd& initial_params, int max_iter = 1000, double tol = 1e-6) {
VectorXd params = initial_params;
for (int i = 0; i < max_iter; ++i) {
VectorXd res = residuals(params, x, y);
MatrixXd J = jacobian(params, x);
// 计算更新步长:delta_params = (J^T * J)^(-1) * J^T * res
VectorXd delta_params = (J.transpose() * J).ldlt().solve(J.transpose() * res);
// 更新参数
VectorXd params_new = params - delta_params;
// 检查收敛条件
if (delta_params.norm() < tol) {
std::cout << "Converged after " << i + 1 << " iterations." << std::endl;
return params_new;
}
params = params_new;
}
std::cout << "Max iterations exceeded. No solution found." << std::endl;
return params;
}
int main() {
// 示例数据
VectorXd x_data(6);
x_data << 0, 1, 2, 3, 4, 5;
VectorXd y_data(6);
y_data = 2 * (-1 * x_data.array()).exp();
// 初始参数猜测 (a, b)
VectorXd initial_guess(2);
initial_guess << 1.0, -1.0;
// 执行高斯牛顿法
VectorXd solution = gauss_newton(x_data, y_data, initial_guess);
// 输出结果
std::cout << "Fitted parameters: a = " << solution(0) << ", b = " << solution(1) << std::endl;
return 0;
}
4. LM法
#include <iostream>
#include <Eigen/Dense>
#include <cmath>
using Eigen::VectorXd;
using Eigen::MatrixXd;
// 定义模型函数 y = a * exp(b * x)
VectorXd model(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
return a * (b * x.array()).exp();
}
// 定义残差函数
VectorXd residuals(const VectorXd& params, const VectorXd& x, const VectorXd& y) {
return model(params, x) - y;
}
// 定义雅可比矩阵
MatrixXd jacobian(const VectorXd& params, const VectorXd& x) {
double a = params(0);
double b = params(1);
MatrixXd J(x.size(), 2);
J.col(0) = (b * x.array()).exp(); // 对 a 的偏导数
J.col(1) = a * x.array() * (b * x.array()).exp(); // 对 b 的偏导数
return J;
}
// Levenberg-Marquardt算法
VectorXd levenberg_marquardt(const VectorXd& x, const VectorXd& y, const VectorXd& initial_params, int max_iter = 1000, double tol = 1e-6, double lambda_init = 0.01) {
VectorXd params = initial_params;
double lamb = lambda_init;
for (int i = 0; i < max_iter; ++i) {
VectorXd res = residuals(params, x, y);
MatrixXd J = jacobian(params, x);
// 计算 Hessian 矩阵近似 H = J.T * J
MatrixXd H = J.transpose() * J;
// 添加 lambda 倍的单位矩阵,以调整步长方向
MatrixXd H_lm = H + lamb * MatrixXd::Identity(params.size(), params.size());
// 计算梯度
VectorXd gradient = J.transpose() * res;
// 计算参数更新值
VectorXd delta_params = H_lm.ldlt().solve(gradient);
// 更新参数
VectorXd params_new = params - delta_params;
// 计算新的残差
VectorXd res_new = residuals(params_new, x, y);
// 如果新的残差平方和小于当前残差平方和,则接受新的参数,并减小 lambda
if (res_new.squaredNorm() < res.squaredNorm()) {
params = params_new;
lamb /= 10;
} else {
// 否则增加 lambda
lamb *= 10;
}
// 检查收敛条件
if (delta_params.norm() < tol) {
std::cout << "Converged after " << i + 1 << " iterations." << std::endl;
return params;
}
}
std::cout << "Max iterations exceeded. No solution found." << std::endl;
return params;
}
int main() {
// 示例数据
VectorXd x_data(6);
x_data << 0, 1, 2, 3, 4, 5;
VectorXd y_data(6);
y_data = 2 * (-1 * x_data.array()).exp();
// 初始参数猜测 (a, b)
VectorXd initial_guess(2);
initial_guess << 1.0, -1.0;
// 执行 Levenberg-Marquardt 法
VectorXd solution = levenberg_marquardt(x_data, y_data, initial_guess);
// 输出结果
std::cout << "Fitted parameters: a = " << solution(0) << ", b = " << solution(1) << std::endl;
return 0;
}