1 KNN算法简介
- KNN算法思想:如果一个样本在特征空间中的K个最相似的样本中的大多数属于某一个类别,则该样本也属于这个类别。
- K值过小:用较小领域中的训练实例进行预测。
- 容易受到异常点的影响
- K值的减小意味着整体模型变得复杂,容易发生过拟合
- K值过大:用较大邻域中的训练实例进行预测。
- 受到样本均衡的的问题
- K值的增大意味着整体的模型变得简单,欠拟合
- K值调优的方法:交叉验证、网格搜索;选择奇数,不要选择类别个数的整数倍;5,7()
- 分类问题&回归问题
2 KNN算法API实现
from sklearn.neighbors import KNeighborsClassifier
def dm01_knnapi_classifier():
estimator=KNeighborsClassifier(n_neighbors=1)
X=[[0],[1],[2],[3]]
y=[0,0,1,1]
estimator.fit(X,y)
myret=estimator.predict([[4]])
print('myret->',myret)
dm01_knnapi_classifier()
from sklearn.neighbors import KNeighborsRegressor
def dm02_knnapi_regressor():
estimator=KNeighborsRegressor(n_neighbors=2)
X=[[0,0,1],
[1,1,0],
[3,10,10],
[4,11,12]]
y=[0.1,0.2,0.3,0.4]
estimator.fit(X,y)
myret=estimator.predict([[3,11,10]])
print('myret->',myret)
dm02_knnapi_regressor()
3 距离度量
3.1 欧氏距离(MSE,L2)
3.2 曼哈顿距离(MAE,L1)
- 也称“城市街区距离”,曼哈顿城市特点:横平竖直。
3.3 切比雪夫距离(Chebyshev Distance)
3.4 闵可夫斯基距离(Minkowski Distance)
4 特征预处理
- 归一化易受异常值影响
- 自然界中数据符合高斯分布
# 归一化
from sklearn.preprocessing import MinMaxScaler
def dm01_MinMaxScaler():
data=[
[90,2,10,40],
[60,4,15,45],
[75,3,13,46]
]
# 初始化归一化对象
transform=MinMaxScaler()
# 对原始特征进行变换
# data=transformer.fit_transform(data)
# 求最大值、最小值
transform.fit(data)
data=transform.transform(data)
print(data)
dm01_MinMaxScaler()
# 标准化
from sklearn.preprocessing import StandardScaler
def dm03_StandardScale():
data=[
[90,2,10,40],
[60,4,15,45],
[75,3,13,46]
]
# 初始化标准化对象
transformer=StandardScaler()
# 对原始特征进行变换
data=transformer.fit_transform(data)
# 打印归一化后的结果
print(data)
# 打印每一列数据的均值和标准差
print('transfer.mean-->',transformer.mean_)
print('transfer.var-->',transformer.var_)
dm03_StandardScale()
5 [案例]Iris分类
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
# 加载数据集
dataset = load_iris()
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.2, random_state=0
)
# 数据标准化
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.transform(x_test)
# 模型训练
knn = KNeighborsClassifier()
# 使用交叉验证网格搜索进行超参数调优
knn = GridSearchCV(estimator=knn, param_grid={'n_neighbors': [1, 3, 5, 7]}, cv=5)
knn.fit(x_train, y_train)
print('knn.best_estimator_ --->', knn.best_estimator_)
# 模型评估
knn_score = knn.score(x_test, y_test)
print('knn_score --->', knn_score)
# 模型预测
new_data = [[0.5, 1.2, 2.4, 5.2]]
new_data = transfer.transform(new_data)
print('预测值为 --->', knn.predict(new_data))
print('预测结果概率分布 --->', knn.predict_proba(new_data))
6 超参数选择方法
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
# 加载数据集
dataset = load_iris()
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.2, random_state=0
)
# 数据标准化
transfer = StandardScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.transform(x_test)
# 模型训练
knn = KNeighborsClassifier()
# 使用交叉验证网格搜索进行超参数调优
knn = GridSearchCV(estimator=knn, param_grid={'n_neighbors': [1, 3, 5, 7]}, cv=5)
knn.fit(x_train, y_train)
print('knn.best_estimator_ --->', knn.best_estimator_)
# 模型评估
knn_score = knn.score(x_test, y_test)
print('knn_score --->', knn_score)
# 模型预测
new_data = [[0.5, 1.2, 2.4, 5.2]]
new_data = transfer.transform(new_data)
print('预测值为 --->', knn.predict(new_data))
print('预测结果概率分布 --->', knn.predict_proba(new_data))
7 [案例]手写数字识别
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler,MinMaxScaler
from sklearn.neighbors import KNeighborsClassifier
import joblib
import matplotlib.pyplot as plt
# 加载数据
dataset = pd.read_csv('手写数字识别.csv')
# 分割数据集
x = dataset.iloc[:, 1:].values
y = dataset.iloc[:, 0].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, stratify=y, random_state=0
)
# 数据归一化
transfer = MinMaxScaler()
x_train = transfer.fit_transform(x_train)
x_test = transfer.transform(x_test)
# 模型训练
knn = KNeighborsClassifier()
# 使用交叉验证网格搜索进行超参数调优
knn = GridSearchCV(estimator=knn, param_grid={'n_neighbors': [3, 5, 7]}, cv=5)
knn.fit(x_train, y_train)
print('knn.best_estimator_', knn.best_estimator_)
# 模型评估
acc = knn.score(x_test, y_test)
print('acc', acc)
# 模型保存
joblib.dump(knn, 'knn.pth')
# 加载模型
knn = joblib.load('knn.pth')
# 读取图片(自动用归一化)
img = plt.imread('demo.png')
# 图片数据处理
img_data = img.reshape(1, -1)
img_pred = knn.predict(img_data)
print('img_pred', img_pred)