机器学习误差图绘

news2025/4/20 9:24:08

机器学习误差图绘制

绘图类

# Define the ModelComparisonPlot class
class ModelComparisonPlot:
    def __init__(self, model_name):
        self.model_name = model_name
    
    def plot_comparison(self, y_val, y_pred, mse, mae, r2):
        # Create a figure with two subplots
        fig, axes = plt.subplots(1, 2, figsize=(11, 5))

        # Plot the predicted vs true values
        sns.regplot(x=y_val, y=y_pred, color='blue', scatter_kws={'alpha':0.5}, ax=axes[0])
        axes[0].plot([y_val.min(), y_val.max()], [y_val.min(), y_val.max()], 'k--', lw=2)
        axes[0].set_xlabel('True values', fontsize=12)
        axes[0].set_ylabel('Predicted values', fontsize=12)
        axes[0].set_title('Predicted vs true values')
        axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

        # Plot the residuals vs predicted values
        residuals = y_val - y_pred
        sns.residplot(x=y_pred, y=residuals, color='blue', scatter_kws={'alpha':0.5}, ax=axes[1])
        axes[1].plot([y_val.min(), y_val.max()], [0, 0], 'k--', lw=2)
        axes[1].set_xlabel('Predicted values', fontsize=12)
        axes[1].set_ylabel('Residuals', fontsize=12)
        axes[1].set_title('Residual plot', fontsize=15)
        axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)
               # Add a title to the figure
        fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(self.model_name), fontsize=15)

        # Adjust the spacing between subplots
        plt.subplots_adjust(wspace=0.4)

        # Display the figure with the title
        plt.show()

独热编码

# Assuming 'Entity' is a categorical variable
X_encoded = pd.get_dummies(df, columns=['实体'], drop_first=True)

确定预测值

# Assuming 'gdp_growth' is target variable
X = X_encoded.drop('人均国内生产总值', axis=1)
y = X_encoded['人均国内生产总值']
X_encoded = X_encoded.fillna(0)  # Replace with your preferred imputation method

区分训练集与测试集

# Assuming X and y are features and labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

机器学习代码

ExtraTreesRegressor


# Create an Extra Trees Regressor model
model_ETR = ExtraTreesRegressor(
    max_depth=None,
    max_features=None,
    min_samples_leaf=1,
    min_samples_split=2,
    n_estimators=300
)

# Fit the model
model_ETR.fit(X_train, y_train)

# Make predictions
y_pred = model_ETR.predict(X_test)

# Evaluate the model
mse = mean_squared_error(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)

# Print evaluation metrics
print(f"Model: {type(model_ETR).__name__}, mse: {mse}")
print(f"Model: {type(model_ETR).__name__}, mae: {mae}")
print(f"Model: {type(model_ETR).__name__}, r2: {r2}")
model_ETR_plot = ModelComparisonPlot('ExtraTreesRegressor')

# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test, y=y_pred, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)  # Remove color argument here
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals = y_test - y_pred
sns.residplot(x=y_pred, y=residuals, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test.min(), y_test.max()], [0, 0], 'k--', lw=2)  # Remove color argument here
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(model_ETR_plot.model_name), fontsize=15, color="black")
# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()


DecisionTreeRegressor

# Create a decision tree regression model
dt_model = DecisionTreeRegressor()
# Fit the model
dt_model.fit(X_train, y_train)
# Make predictions
predictions = dt_model.predict(X_test)
# Evaluate the model
mse = mean_squared_error(y_test, predictions)
mae = mean_absolute_error(y_test, predictions)
r2 = r2_score(y_test, predictions)
# Print evaluation metrics
print(f"Model: {type(dt_model).__name__}, mse: {mse}")
print(f"Model: {type(dt_model).__name__}, mae: {mae}")
print(f"Model: {type(dt_model).__name__}, r2: {r2}")
dt_model_plot = ModelComparisonPlot('DecisionTreeRegressor')

# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test, y=predictions, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)  # Remove color argument here
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals = y_test - predictions
sns.residplot(x=predictions, y=residuals, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test.min(), y_test.max()], [0, 0], 'k--', lw=2)  # Remove color argument here
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(dt_model_plot.model_name), fontsize=15, color="black")
# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()

LinearRegression

# Create a linear regression model
linear_model = LinearRegression()

# Fit the model
linear_model.fit(X_train, y_train)

# Make predictions
predictions_linear = linear_model.predict(X_test)

# Evaluate the model
mse_linear = mean_squared_error(y_test, predictions_linear)
mae_linear = mean_absolute_error(y_test, predictions_linear)
r2_linear = r2_score(y_test, predictions_linear)

# Print evaluation metrics
print(f"Model: {type(linear_model).__name__}, mse: {mse_linear}")
print(f"Model: {type(linear_model).__name__}, mae: {mae_linear}")
print(f"Model: {type(linear_model).__name__}, r2: {r2_linear}")
linear_model_plot = ModelComparisonPlot('LinearRegression')

# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test, y=predictions_linear, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)  # Remove color argument here
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals_linear = y_test - predictions_linear
sns.residplot(x=predictions_linear, y=residuals_linear, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test.min(), y_test.max()], [0, 0], 'k--', lw=2)  # Remove color argument here
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)
# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(linear_model_plot.model_name), fontsize=15, color="black")

# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()

KNeighborsRegressor

# Create a KNN regression model
knn_model = KNeighborsRegressor()

# Fit the model
knn_model.fit(X_train, y_train)

# Make predictions
predictions_knn = knn_model.predict(X_test)

# Evaluate the model
mse_knn = mean_squared_error(y_test, predictions_knn)
mae_knn = mean_absolute_error(y_test, predictions_knn)
r2_knn = r2_score(y_test, predictions_knn)

# Print evaluation metrics
print(f"Model: {type(knn_model).__name__}, mse: {mse_knn}")
print(f"Model: {type(knn_model).__name__}, mae: {mae_knn}")
print(f"Model: {type(knn_model).__name__}, r2: {r2_knn}")
knn_model_plot = ModelComparisonPlot('KNeighborsRegressor')

# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test, y=predictions_knn, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)  # Remove color argument here
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals_knn = y_test - predictions_knn
sns.residplot(x=predictions_knn, y=residuals_knn, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test.min(), y_test.max()], [0, 0], 'k--', lw=2)  # Remove color argument here
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)
# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(knn_model_plot.model_name), fontsize=15, color="black")

# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()

XGboost

# Convert 'Density' column to numeric
X_train['Density'] = pd.to_numeric(X_train['Density'], errors='coerce')
X_test['Density'] = pd.to_numeric(X_test['Density'], errors='coerce')

# Drop rows with missing values after conversion
X_train = X_train.dropna()
X_test = X_test.dropna()

# Create an XGBoost regression model
xgb_model = XGBRegressor()

# Fit the model
xgb_model.fit(X_train, y_train)

# Make predictions
predictions_xgb = xgb_model.predict(X_test)

# Evaluate the model
mse_xgb = mean_squared_error(y_test, predictions_xgb)
mae_xgb = mean_absolute_error(y_test, predictions_xgb)
r2_xgb = r2_score(y_test, predictions_xgb)

# Print evaluation metrics
print(f"Model: {type(xgb_model).__name__}, mse: {mse_xgb}")
print(f"Model: {type(xgb_model).__name__}, mae: {mae_xgb}")
print(f"Model: {type(xgb_model).__name__}, r2: {r2_xgb}")
xgb_model_plot = ModelComparisonPlot('XGBRegressor')

# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test, y=predictions_xgb, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], 'k--', lw=2)  # Remove color argument here
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals_xgb = y_test - predictions_xgb
sns.residplot(x=predictions_xgb, y=residuals_xgb, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test.min(), y_test.max()], [0, 0], 'k--', lw=2)  # Remove color argument here
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)
# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\n{}'.format(xgb_model_plot.model_name), fontsize=15, color="black")

# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()

Gaussian Naive Bayes

# Convert continuous labels to binary categories
y_train_binary = (y_train > y_train.mean()).astype(int)
y_test_binary = (y_test > y_train.mean()).astype(int)
# Create a Naive Bayes model
nb_model = GaussianNB()
# Fit the model
nb_model.fit(X_train, y_train_binary)
# Make predictions
predictions = nb_model.predict(X_test)
X_encoded['Density'] = pd.to_numeric(X_encoded['Density'], errors='coerce')

# Drop 'gdp_growth' as before
X = X_encoded.drop('国内生产总值增长率', axis=1)
y = X_encoded['国内生产总值增长率']
X_encoded = X_encoded.fillna(0)

# Assuming X and y are your features and labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# List of algorithms to check
algorithms = [
    LinearRegression(),
    DecisionTreeRegressor(),
    KNeighborsRegressor(),
    XGBRegressor()
]

best_mse = float('inf')
best_model = None
# Loop through each algorithm
for model in algorithms:
    # Fit the model
    model.fit(X_train, y_train)

    # Make predictions
    predictions = model.predict(X_test)

    # Evaluate the model using cross-validation with mean squared error
    mse_scores = -cross_val_score(model, X, y, cv=5, scoring='neg_mean_squared_error')
    mean_mse = np.mean(mse_scores)
    
    # Print the cross-validation mean squared error
    print(f"{model.__class__.__name__} - Cross-Validation MSE: {mean_mse}")

    # Update the best model if the current model has lower mean squared error
    if mean_mse < best_mse:
        best_mse = mean_mse
        best_model = model

# Print the best model and its mean squared error
print("\nBest Model:")
print(best_model)
print("Best Cross-Validation MSE:", best_mse)
# Set Seaborn style
sns.set(style="whitegrid", rc={"axes.facecolor": "white", "grid.color": "#D3D3D3"})

# Create a figure with two subplots
fig, axes = plt.subplots(1, 2, figsize=(11, 5), facecolor="#F5F5F5")
# Plot the predicted vs true values
sns.regplot(x=y_test_binary, y=predictions, color='red', scatter_kws={'alpha': 0.5}, ax=axes[0])
axes[0].plot([y_test_binary.min(), y_test_binary.max()], [y_test_binary.min(), y_test_binary.max()], 'k--', lw=2)
axes[0].set_xlabel('True values', fontsize=12, color="red")
axes[0].set_ylabel('Predicted values', fontsize=12, color="red")
axes[0].set_title('Predicted vs true values', color="red")
axes[0].grid(color='lightgray', linestyle='--', linewidth=0.5)

# Plot the residuals vs predicted values
residuals = y_test_binary - predictions
sns.residplot(x=predictions, y=residuals, color='green', scatter_kws={'alpha': 0.5}, ax=axes[1])
axes[1].plot([y_test_binary.min(), y_test_binary.max()], [0, 0], 'k--', lw=2)
axes[1].set_xlabel('Predicted values', fontsize=12, color="green")
axes[1].set_ylabel('Residuals', fontsize=12, color="green")
axes[1].set_title('Residual plot', fontsize=15, color="green")
axes[1].grid(color='lightgray', linestyle='--', linewidth=0.5)
# Add a title to the figure
fig.suptitle('Comparison of Predicted vs True Values and Residual Plot\nGaussianNB', fontsize=15, color="black")

# Adjust the spacing between subplots
plt.subplots_adjust(wspace=0.4)

# Display the figure with the title
plt.show()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2338635.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

【Linux】深入理解Linux文件系统:从C接口到内核设计哲学

文章目录 前言一、C语言中的文件接口1. 文件指针&#xff08;句柄&#xff09;FILE*以写方式打开文件&#xff0c;若文件不存在会新建一个文件W写入方式&#xff0c;在打开文件之前都会将文件内容全部清空追加写方式&#xff0c;其用法与写方法一致&#xff0c;不同在于a方法可…

基于尚硅谷FreeRTOS视频笔记——15—系统配制文件说明与数据规范

目录 配置函数 INCLUDE函数 config函数 数据类型 命名规范 函数与宏 配置函数 官网上可以查找 最核心的就是 config和INCLUDE INCLUDE函数 这些就是裁剪的函数 它们使用一个ifndef。如果定义了&#xff0c;就如果定义了这个宏定义&#xff0c;那么代码就生效。 通过ifn…

Linux网络编程 深入解析TFTP协议:基于UDP的文件传输实战

知识点1【TFTP的概述】 学习通信的基本&#xff1a;通信协议&#xff08;具体发送上面样的报文&#xff09;、通信流程&#xff08;按照什么步骤发送&#xff09; 1、TFTP的概述 tftp&#xff1a;简单文件传输协议&#xff0c;**基于UDP&#xff0c;**不进行用户有效性验证 …

c# MES生产进度看板,报警看板 热流道行业可用实时看生产进度

MES生产进度看板&#xff0c;报警看板 热流道行业可用实时看生产进度 背景 本软件是给宁波热流道行业客户开发的生产电子看板软件系统 功能 1.录入工艺流程图&#xff08;途程图&#xff09;由多个站别组成。可以手动设置每个工艺站点完成百分比。 2.可以看生成到哪个工…

初识Redis · C++客户端string

目录 前言&#xff1a; string的API使用 set get&#xff1a; expire: NX XX: mset,mget&#xff1a; getrange setrange: incr decr 前言&#xff1a; 在前文&#xff0c;我们已经学习了Redis的定制化客户端怎么来的&#xff0c;以及如何配置好Redis定制化客户端&…

华硕原厂系统枪神9/9p超竟版-WIN11原装开箱出厂系统安装

华硕原厂系统枪神9/9p超竟版-WIN11-24H2-专业工作站版本安装可带F12-ASUSRecovery恢复功能 适用机型&#xff1a; G635LX、G635LW、G835LX、G835LW、G615LW、G615LP、G615LM、G615LH G815LW、G815LP、G815LM、G815LH、G635LR、G835LR、G615LR、G815LR 远程恢复安装&#xff…

CF1016赛后总结

文章目录 前言T1:Ideal GeneratorT2&#xff1a;Expensive NumberT3:Simple RepetitionT4&#xff1a;Skibidi TableT5:Min Max MEXT6:Hackers and Neural NetworksT7:Shorten the Array 前言 由于最近在半期考试&#xff0c;更新稍微晚了一点&#xff0c;还望大家见谅 &#…

QT聊天项目DAY06

1.从git上同步项目 编译测试&#xff0c;编译通过 Post请求测试 测试成功 2. email is 打印有问题&#xff0c;检查 解析结果是存储在jsonResult中的&#xff0c;修改 3. 客户端实现Post验证码请求 3.1 同步Qt客户端项目 检查QT版本&#xff0c;由于我在公司用的还是QT5.12.9…

GNU,GDB,GCC,G++是什么?与其他编译器又有什么关系?

文章目录 前言1. GNU和他的工具1.1 gcc与g1.2 gdb 2.Windows的Mingw/MSVC3.LLVM的clang/clang4.Make/CMake 前言 在开始之前我们先放一段Hello World&#xff1a;hello.c #include <stdio.h>int main() {printf("Hello World");return 0; }然后就是一段老生常…

笔记整理五

STP生成树 stp生成树是用于解决二层环路问题的协议。 二层环路为有以下三种&#xff1a; 1.广播风暴 2.MAC地址的偏移&#xff08;每一次循环&#xff0c;都会导致交换机来回刷新MAC地址表记录&#xff09; 3.多帧复制 stp生成树&#xff1a;需要将原本的环型拓扑结构转换…

奥比中光tof相机开发学习笔记

针对奥比中光 tof相机&#xff0c;官方提供的资料如下ProcessOn Mindmap|思维导图 Orbbec SDK Python Wrapper基于Orbbec SDK进行设计封装&#xff0c;主要实现数据流接收&#xff0c;设备指令控制。下面就其开发适配进行如下总结&#xff1a; &#xff08;1&#xff09;系统配…

【面试向】点积与注意力机制,逐步编码理解自注意力机制

点积&#xff08;dot product&#xff09;两个向量点积的数学公式点积&#xff08;dot product&#xff09;与 Attention 注意力机制&#xff08;Attention&#xff09;注意力机制的核心思想注意力机制中的缩放点积自注意力机制中&#xff0c;谁注意谁&#xff1f; 逐步编码理解…

一个 CTO 的深度思考

今天和一些同事聊了一会&#xff0c;以下是我的观点 我的观点&#xff0c;成年人只能筛选&#xff0c;不能培养在组织中&#xff0c;应该永远向有结果的人看齐。不能当他站出来讲话的时候&#xff0c;大家还要讨论讨论&#xff0c;他虽然拿到结果了&#xff0c;但是他就是有一…

SQL通用语法和注释,SQL语句分类(DDL,DML,DQL,DCL)及案例

目录 SQL通用语法和注释 SQL语句分类&#xff08;DDL&#xff0c;DML&#xff0c;DQL&#xff0c;DCL&#xff0c;TPL&#xff0c;CCL&#xff09; DDL&#xff08;数据定义语言&#xff09; 数据库操作 查询&#xff08;SHOW、SELECT&#xff09; 创建&#xff08;CREAT…

AUTOSAR图解==>AUTOSAR_SWS_KeyManager

AUTOSAR KeyManager详细分析 AUTOSAR 4.4.0 版本密钥与证书管理模块技术分析 目录 1. 概述2. KeyManager架构 2.1 KeyManager在AUTOSAR架构中的位置2.2 架构说明 3. KeyManager模块结构 3.1 模块组件详解3.2 配置项说明 4. KeyManager证书验证流程 4.1 证书验证流程分析 5. Ke…

Jsp技术入门指南【七】JSP动作讲解

Jsp技术入门指南【七】JSP动作讲解 前言一、什么是JSP动作&#xff1f;二、核心JSP动作详解1. jsp:include&#xff1a;动态包含其他页面与<% include %>的区别 2. jsp:forward&#xff1a;请求转发到另一个页面3. jsp:param&#xff1a;为动作传递参数4. jsp:useBean&am…

10软件测试需求分析案例-查询学习信息

用户登录系统后&#xff0c;进入查询学生信息界面&#xff0c;输入查询字段值&#xff0c;点击查询按钮后&#xff0c;展示查询到的学生信息&#xff0c;可以重新输入字段值进行查询。 查询学生信息属于学生信息管理的子菜单&#xff0c;可以根据学号、姓名、性别查询。老师登录…

基于尚硅谷FreeRTOS视频笔记——6—滴答时钟—上下文切换

FreeRTOS滴答 FreeRTOS需要有一个时钟参照&#xff0c;并且这个时钟不会被轻易打断&#xff0c;所以最好选择systick 为什么需要时间参照 就是在高优先级任务进入阻塞态后&#xff0c;也可以理解为进入delay&#xff08;&#xff09;函数后&#xff0c;需要有一个时间参照&…

MCP服务,阿里云百炼,Cline,mysql-mcp-server,MCP通信原理

简介 MCP&#xff08;Model Context Protocol&#xff09;&#xff0c;模型上下文协议&#xff0c;是一种开放标准&#xff0c;用于将AI模型与外部数据源和工具建立安全的双向连接&#xff0c;它就像AI领域的USB-C接口&#xff0c;为AI模型提供了一种标准化方式来连接不同的数…

一个项目中多个Composer的使用方法

composer是依赖管理工具。 有时我们会在一个项目中使用到多个composer&#xff0c;且每个版本不同。 前提&#xff1a;例如项目xyz根目录vendor中存在阿里云的对应代码。我现在需要再composer腾讯云短信发送的SDK。 1、随便找个位置新建文件夹&#xff0c;存储腾讯云短信发送…