多机器学习模型学习

news2024/9/24 19:57:55

特征处理

import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.impute import SimpleImputer
from sklearn.pipeline import FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin

# 读取数据
# 数据来源:https://github.com/bophancong/Handson_ml2-master/tree/master/datasets/housing
housing = pd.read_csv("housing.csv")
#  income_cat
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"]<5, 5.0, inplace=True)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)

# 分层抽样:根据 income_cat 划分数据集
for train_index, test_index in split.split(housing, housing["income_cat"]):
    strat_train_set = housing.loc[train_index]
    strat_test_set = housing.loc[test_index]

# 删除 income_cat
for set in (strat_train_set, strat_test_set):
    set.drop(["income_cat"], axis=1, inplace=True)

###################我们有了训练集和测试集#####################################

# 处理训练集
# 将特征和目标值拆分
train_features = strat_train_set.drop('median_house_value', axis=1)
train_target = strat_train_set['median_house_value'].copy()
# 测试集
test_features = strat_test_set.drop('median_house_value', axis=1)
test_target = strat_test_set['median_house_value'].copy()


# 查看离散特征和连续特征个数 【记得把id和标签去掉】
cat_features = list(train_features.select_dtypes(include=['object']))
print('离散特征Categorical: {} features'.format(len(cat_features)))
cont_features = [cont for cont in list(train_features.select_dtypes(include=['float64','int64'])) if cont not in ['median_house_value','id']]
print('连续特征Continuous: {} features'.format(len(cont_features)))
# 利用下面的四个特征构造新特征
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
    def __init__(self, add_bedrooms_pre_room=True):
        self.add_bedrooms_pre_room = add_bedrooms_pre_room
    
    def fit(self, X, y=None):
        return self
    
    def transform(self, X, y=None):
        rooms_pre_household = X[:, rooms_ix] / X[:, household_ix]
        population_pre_household = X[:, population_ix] / X[:, household_ix]
        if self.add_bedrooms_pre_room:
            bedrooms_pre_room = X[:, bedrooms_ix] / X[:, rooms_ix]
            return np.c_[X, rooms_pre_household, population_pre_household, bedrooms_pre_room]
        else:
            return np.c_[X, rooms_pre_household, population_pre_household]

class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names=attribute_names
    def fit(self, X, y=None):
        return self
    def transform(self, X):
        return X[self.attribute_names].values

class MyLabelBinarizer(BaseEstimator, TransformerMixin):
    def __init__(self, *args, **kwargs):
        self.encoder = LabelBinarizer(*args, **kwargs)
    def fit(self, x, y=None):
        self.encoder.fit(x)
        return self
    def transform(self, x, y=None):
        return self.encoder.transform(x)
train_num = train_features.drop('ocean_proximity', axis=1)
num_attribs = list(train_num)
cat_attribs = ['ocean_proximity']

num_pipline = Pipeline([('selector', DataFrameSelector(num_attribs)),
                        ('imputer', SimpleImputer(strategy='median')),
                        ('attribs_addr', CombinedAttributesAdder()),
                        ('std_scaler', StandardScaler()),
                       ])

cat_pipline = Pipeline([('selector', DataFrameSelector(cat_attribs)),
                        ('label_binarizer', MyLabelBinarizer()),
                       ])

full_pipeline = FeatureUnion(transformer_list=[('num_pipeline', num_pipline),
                                               ('cat_pipeline', cat_pipline),
                                              ])

final_train_features = full_pipeline.fit_transform(train_features)
final_train_target = train_target
# 同样的道理可以处理test_features
final_test_features = full_pipeline.transform(test_features)
final_test_target = test_target

至此,训练集的特征和目标值,以及测试集的特征和目标值均已经可用。。。

尝试各个机器学习方法,找到最优

线性回归

# 回归模型
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
# 线性回归
lin_reg = LinearRegression();
lin_reg.fit(final_train_features, final_train_target)
# 计算RSME
lr_pred_train_target = lin_reg.predict(final_train_features)
lin_mse = mean_squared_error(final_train_target, lr_pred_train_target)
lin_rmse = np.sqrt(lin_mse)
# 打印lin_rmse 的值
print(lin_rmse)

决策树

# 决策树
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor();
tree_reg.fit(final_train_features, final_train_target)
dtr_pred_train_target = tree_reg.predict(final_train_features)
tree_mse = mean_squared_error(final_train_target, dtr_pred_train_target)
tree_rmse = np.sqrt(tree_mse)
# tree_rmse 的值
print(tree_rmse)

随机森林

# 随机森林
from sklearn.ensemble import RandomForestRegressor

forest_reg = RandomForestRegressor()
forest_reg.fit(final_train_features, final_train_target)
rf_pred_train_target = forest_reg.predict(final_train_features)
forest_mse = mean_squared_error(final_train_target, rf_pred_train_target)
forest_rmse = np.sqrt(forest_mse)
# 打印结果
print(forest_rmse)

分别交叉验证

# 使用交叉验证来做更佳的评估
from sklearn.model_selection import cross_val_score

# 定义一个函数来打印交叉验证的结果
def displayScores(scores):
    print("Scores:", scores)
    print("Mean:", scores.mean())
    print("Std:", scores.std())

# 计算决策树模型的交叉验证结果
dtr_scores = cross_val_score(tree_reg, final_train_features, final_train_target,
                        scoring='neg_mean_squared_error', cv=10)
tree_rmse_scores = np.sqrt(-dtr_scores)
print("---------Decision Tree Regression")
displayScores(tree_rmse_scores)

# 计算线性模型的交叉验证结果
lr_scores = cross_val_score(lin_reg, final_train_features, final_train_target,
                        scoring='neg_mean_squared_error', cv=10)
lin_rmse_scores = np.sqrt(-lr_scores)
print("---------Linear Regression")
displayScores(lin_rmse_scores)

# 随机森林
rf_scores = cross_val_score(forest_reg, final_train_features, final_train_target,
                        scoring='neg_mean_squared_error', cv=10)
forest_rmse_scores = np.sqrt(-rf_scores)
print("---------Random Forest")
displayScores(forest_rmse_scores)
  • 普通逻辑回归:显然回归模型欠拟合,特征没有提供足够多的信息来做一个好的预测,或者模型不够强大
  • 普通决策树结果:显然模型可能过拟合
  • 交叉验证结果:判断没错:决策树模型过拟合很严重,它的性能比线性回归模型还差
  • 目前来看,随机森林效果是最好的,下面对随机森林模型进行模型微调

模型保存和读取

# 保存模型
import joblib

output_path = 'model/'
if not os.path.isdir(output_path):
    os.makedirs(output_path)
joblib.dump(forest_reg, output_path+'forest_reg.pkl')

# 清除缓存
import gc
del forest_reg
gc.collect()

# 加载离线模型,并且测试
forest_reg = joblib.load(output_path + 'forest_reg.pkl')
forest_reg.predict(final_train_features)

对最优的机器学习方法调参

  1. 网格搜索
  2. 随机搜索
  3. 集成方法

网格搜索

from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
param_grid = [
    {'n_estimators': [3,10,30], 'max_features': [2,4,6,8]},
    {'bootstrap': [False], 'n_estimators': [3,10], 'max_features': [2,3,4]},
]

forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, 
                           scoring='neg_mean_squared_error')
grid_search.fit(final_train_features, final_train_target)

# 打印grid_search.best_params_
print(grid_search.best_params_)
print(grid_search.best_estimator_)


# 评估每个参数组合的评分
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
    print(np.sqrt(-mean_score), params)

随机搜索

from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
distributions = dict(n_estimators=[3,10,30], max_features=[2,4,6,8])

forest_reg_rs = RandomForestRegressor()
grid_search_rs = RandomizedSearchCV(forest_reg_rs, distributions, 
                                 random_state=0, cv=5,
                                 scoring='neg_mean_squared_error')
grid_search_rs.fit(final_train_features, final_train_target)

# 打印grid_search.best_params_
print(grid_search_rs.best_params_)
print(grid_search_rs.best_estimator_)

# 评估评分
cvres_rs = grid_search_rs.cv_results_
for mean_score_rs, params_rs in zip(cvres_rs['mean_test_score'], cvres_rs['params']):
    print(np.sqrt(-mean_score_rs), params_rs)

集成方法

暂无

分析最佳模型和他们的误差

  • 根据特征重要性,丢弃一些无用特征
  • 观察系统误差,搞清为什么有这些误差,如何改正问题(添加、去掉、清洗异常值等措施)
# 打印特征重要性
feature_importances = grid_search_rs.best_estimator_.feature_importances_

# 获取特征名称
# 对于数值特征,直接使用 num_attribs
# 对于分类特征,使用 MyLabelBinarizer 转换后的特征名称
label_binarizer = MyLabelBinarizer()
label_binarizer.fit(train_features['ocean_proximity'])
cat_feature_names = label_binarizer.encoder.classes_

# 构造完整的特征名称列表
feature_names = num_attribs + ['rooms_per_household', 'population_per_household', 'bedrooms_per_room'] + list(cat_feature_names)

# 确保特征重要性的长度与特征名称列表的长度相匹配
# 由于分类特征被转换为独热编码,每个分类特征将有多个特征重要性值
# 我们需要扩展特征重要性数组以匹配特征名称列表的长度
extended_importances = np.zeros(len(feature_names))
num_features = len(num_attribs) + 3  # 数值特征 + 新增的3个特征
extended_importances[:num_features] = feature_importances[:num_features]
extended_importances[num_features:] = feature_importances[num_features:]

# 将特征重要性和特征名称组合成一个DataFrame
feature_importances_df = pd.DataFrame({
    'feature': feature_names,
    'importance': extended_importances
}).sort_values('importance', ascending=False)

# 打印每个特征的重要性
print(feature_importances_df)

测试集评估

from sklearn.metrics import mean_squared_error
final_model = grid_search.best_estimator_
final_predictions = final_model.predict(final_test_features)
final_mse = mean_squared_error(final_test_target, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse)

代码汇总

import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.impute import SimpleImputer
from sklearn.pipeline import FeatureUnion
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin

# 读取数据
# 数据来源:https://github.com/bophancong/Handson_ml2-master/tree/master/datasets/housing
housing = pd.read_csv("housing.csv")
#  income_cat
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"]<5, 5.0, inplace=True)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)

# 分层抽样:根据 income_cat 划分数据集
for train_index, test_index in split.split(housing, housing["income_cat"]):
    strat_train_set = housing.loc[train_index]
    strat_test_set = housing.loc[test_index]

# 删除 income_cat
for set in (strat_train_set, strat_test_set):
    set.drop(["income_cat"], axis=1, inplace=True)

###################我们有了训练集和测试集#####################################

# 处理训练集
# 将特征和目标值拆分
train_features = strat_train_set.drop('median_house_value', axis=1)
train_target = strat_train_set['median_house_value'].copy()
# 测试集
test_features = strat_test_set.drop('median_house_value', axis=1)
test_target = strat_test_set['median_house_value'].copy()


# 利用下面的四个特征构造新特征
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
    def __init__(self, add_bedrooms_pre_room=True):
        self.add_bedrooms_pre_room = add_bedrooms_pre_room
    
    def fit(self, X, y=None):
        return self
    
    def transform(self, X, y=None):
        rooms_pre_household = X[:, rooms_ix] / X[:, household_ix]
        population_pre_household = X[:, population_ix] / X[:, household_ix]
        if self.add_bedrooms_pre_room:
            bedrooms_pre_room = X[:, bedrooms_ix] / X[:, rooms_ix]
            return np.c_[X, rooms_pre_household, population_pre_household, bedrooms_pre_room]
        else:
            return np.c_[X, rooms_pre_household, population_pre_household]

# 特征选择
class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, attribute_names):
        self.attribute_names=attribute_names
    def fit(self, X, y=None):
        return self
    def transform(self, X):
        return X[self.attribute_names].values
# 标签编码
class MyLabelBinarizer(BaseEstimator, TransformerMixin):
    def __init__(self, *args, **kwargs):
        self.encoder = LabelBinarizer(*args, **kwargs)
    def fit(self, x, y=None):
        self.encoder.fit(x)
        return self
    def transform(self, x, y=None):
        return self.encoder.transform(x)
    




train_num = train_features.drop('ocean_proximity', axis=1)
num_attribs = list(train_num)
cat_attribs = ['ocean_proximity']

num_pipline = Pipeline([('selector', DataFrameSelector(num_attribs)),
                        ('imputer', SimpleImputer(strategy='median')),
                        ('attribs_addr', CombinedAttributesAdder()),
                        ('std_scaler', StandardScaler()),
                       ])

cat_pipline = Pipeline([('selector', DataFrameSelector(cat_attribs)),
                        ('label_binarizer', MyLabelBinarizer()),
                       ])

full_pipeline = FeatureUnion(transformer_list=[('num_pipeline', num_pipline),
                                               ('cat_pipeline', cat_pipline),
                                              ])

final_train_features = full_pipeline.fit_transform(train_features)
final_train_target = train_target
# 同样的道理可以处理test_features
final_test_features = full_pipeline.transform(test_features)
final_test_target = test_target


# 随机搜索
from sklearn.model_selection import RandomizedSearchCV
from sklearn.ensemble import RandomForestRegressor
distributions = dict(n_estimators=[3,10,30], max_features=[2,4,6,8])

forest_reg_rs = RandomForestRegressor()
grid_search_rs = RandomizedSearchCV(forest_reg_rs, distributions, 
                                 random_state=0, cv=5,
                                 scoring='neg_mean_squared_error')
grid_search_rs.fit(final_train_features, final_train_target)

# 打印特征重要性
feature_importances = grid_search_rs.best_estimator_.feature_importances_

# 获取特征名称
# 对于数值特征,直接使用 num_attribs
# 对于分类特征,使用 MyLabelBinarizer 转换后的特征名称
label_binarizer = MyLabelBinarizer()
label_binarizer.fit(train_features['ocean_proximity'])
cat_feature_names = label_binarizer.encoder.classes_

# 构造完整的特征名称列表
feature_names = num_attribs + ['rooms_per_household', 'population_per_household', 'bedrooms_per_room'] + list(cat_feature_names)

# 确保特征重要性的长度与特征名称列表的长度相匹配
# 由于分类特征被转换为独热编码,每个分类特征将有多个特征重要性值
# 我们需要扩展特征重要性数组以匹配特征名称列表的长度
extended_importances = np.zeros(len(feature_names))
num_features = len(num_attribs) + 3  # 数值特征 + 新增的3个特征
extended_importances[:num_features] = feature_importances[:num_features]
extended_importances[num_features:] = feature_importances[num_features:]

# 将特征重要性和特征名称组合成一个DataFrame
feature_importances_df = pd.DataFrame({
    'feature': feature_names,
    'importance': extended_importances
}).sort_values('importance', ascending=False)

# 打印每个特征的重要性
print(feature_importances_df)


from sklearn.metrics import mean_squared_error
final_model = grid_search_rs.best_estimator_
final_predictions = final_model.predict(final_test_features)
final_mse = mean_squared_error(final_test_target, final_predictions)
final_rmse = np.sqrt(final_mse)
print(final_rmse)

 参考:

  1. Machine-Learning/ML_0_20201224_前言.ipynb at master · myhaa/Machine-Learning · GitHub
  2. Machine-Learning/ML_2_20201225_一个完整的机器学习项目.ipynb at master · myhaa/Machine-Learning · GitHub

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/2161286.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

行业副教授亲授,好评如潮丨合成孔径雷达干涉测量InSAR数据处理、地形三维重建、形变信息提取、监测等技能,助力精准决策!

目录 第一章 InSAR技术应用现状分析及其发展 第二章 InSAR原理、技术方法讲解 第三章 数据处理环境建立与软件熟悉 第四章 SAR影像数据获取、DEM数据获取 InSAR数据前处理技术 第五章 InSAR地形三维重建 第六章 DInSAR形变信息提取 第七章 时序InSAR技术形变速率与形变时…

【C++】检测TCP链接超时——时间轮组件设计

目录 引言 时间轮思想 设计的核心思路 完整代码 组件接口 个人主页&#xff1a;东洛的克莱斯韦克-CSDN博客 引言 对于高并发的服务器来说&#xff0c;链接是一种比较珍贵的资源&#xff0c;对不活跃的链接应该及时释放。判断连接是否活跃的策略是——在给定的时间内&#…

04 面部表情识别:Pytorch实现表情识别-表情数据集训练代码

总目录:人脸检测与表情分类 https://blog.csdn.net/whiffeyf/category_12793480.html 目录 0 相关资料1 面部表情识数据集2 模型下载3 训练0 相关资料 面部表情识别2:Pytorch实现表情识别(含表情识别数据集和训练代码):https://blog.csdn.net/guyuealian/article/details/1…

017_FEA_CSG_in_Matlab新的统一有限元分析工作流之2D几何

Matlab新的统一有限元分析工作流 从2023a开始&#xff0c;Matlab提供了一个统一有限元分析工作流&#xff08;UFEAW&#xff0c;unified finite element analysis workflow&#xff09;。 这个新的工作留提供一个统一的接口来求解三类问题&#xff0c;并且可以用同一套数据随…

828华为云征文 | 云服务器Flexus X实例,Docker集成搭建搭建Flink

828华为云征文 | 云服务器Flexus X实例&#xff0c;Docker集成搭建搭建Flink Apache Flink是一个分布式大数据计算引擎&#xff0c;专为处理无界和有界数据流上的有状态计算而设计&#xff0c;以其高吞吐量、低延迟和高性能在实时流处理和批量计算领域脱颖而出&#xff0c;Flin…

Vue2电商项目(四) Detail模块

文章目录 一、配置Detail路由1. 将Detail组件配置为路由组件2. 将路由配置文件拆分3. 声明式导航跳转到Detail跳转时存在的问题&#xff1a;页面滚动条还在下边 二、配置API及vuex三、放大镜及下方轮播图1. Detail组件传递放大镜数据2. 读取vuex数据的经典错误undefined3. 放大…

个人如何做量化?我想进行量化交易需要哪些条件?QMT/PTrade量化软件?

个人如何做量化&#xff1f;我想进行量化交易需要哪些条件&#xff1f;QMT&#xff0c;PTrade量化软件&#xff1f; 量化交易策略是一种基于数学模型和统计分析的交易方法&#xff0c;通过计算机程序自动执行交易指令&#xff0c;以实现稳定、可持续的收益。这种策略的核心思想…

【研赛E题成品论文】24华为杯数学建模研赛E题成品论文+可运行代码丨免费分享

2024华为杯研究生数学建模竞赛E题成品论文已出&#xff01; E题 高速公路应急车道紧急启用模型 一、问题一模型建立与求解 1.1 问题一求解思路 赛题要求我们基于四个观测点的视频数据&#xff0c;提取交通流参数并分析这些参数随时间的变化规律。交通流参数包括&#xff1a;…

【秋招笔试题】多多排序

解法&#xff1a;简单语法题 package com.sky;import java.util.*;public class Test1 {public static void main(String[] args) {Scanner sc new Scanner(System.in);int N sc.nextInt();int M sc.nextInt();List<String> words new ArrayList<>(N);for (in…

[系统设计总结] - Proximity Service算法介绍

问题描述 Proximity Service广泛应用于各种地图相关的服务中比如外卖&#xff0c;大众点评&#xff0c;Uber打车&#xff0c;Google地图中&#xff0c;其中比较关键的是我们根据用户的位置来快速找到附近的餐厅&#xff0c;司机&#xff0c;外卖员也就是就近查询算法。 主流的…

再论单源最短路径-SPFA

之前只是背了SPFA的算法模板&#xff0c;但是没有真正理解其中含义。这里复习时再次进行理解。 首先&#xff0c;正常的单源最短路径都会由下面的一个结构来维护“距离”&#xff0c;这个结构可以用一个数字dist[N]来描述&#xff0c;其中下标为顶点编号&#xff0c;值为“暂时…

期盼已久!通义灵码 AI 程序员开启邀测,全流程开发仅用几分钟

在 AI 程序员的帮助下&#xff0c;一个几乎没有专业编程经验的初中生&#xff0c;在人头攒动的展台上从零开始&#xff0c;两分钟就做出了一个倒计时网页。 他需要做的&#xff0c;只是输入包含几句话的提示词。数秒钟后&#xff0c;大模型就生成了代码&#xff0c;还列出了环…

Redis6.0.9配置redis集群

写在前面 最近在完成暑期大作业&#xff0c;期间要将项目部署在云服务器上&#xff0c;其中需要进行缓存的配置&#xff0c;决定使用Redis&#xff0c;为了使系统更加健壮&#xff0c;选择配置Redis-Cluster。由于服务器资源有限&#xff0c;在一台服务器上运行6个Redis Instan…

Springboot-多数据源

文章目录 一、架构二、实现过程2.1 第一步&#xff1a;引入依赖pom2.2 第二步&#xff1a;创建application.yml配置2.3 第三步&#xff1a;创建架构的文件夹MybatisPlusConfigFirstDataSourceConfigSecondDataSourceConfig 实现功能&#xff0c;在不同的文件夹使用不同的库 一、…

【软件测试】金九银十,APP面试题经验分享

Web 端测试和 App 端测试有何不同? ① 系统架构方面 Web 项目&#xff0c;b/s架构&#xff0c;基于浏览器的&#xff1b;Web 测试只要更新了服务器端&#xff0c;客户端就会同步会更新&#xff1b; App 项目&#xff0c;c/s架构的&#xff0c;必须要有客户端&#xff1b;App…

基于Ambari搭建大数据分析平台(30分钟速成)全网最全最详细的Ambari搭建大数据分析平台:

全网最全最详细的Ambari搭建大数据分析平台&#xff1a; 方法一适合详细自己独立安装&#xff0c;方法二超级详细具体&#xff0c;是根据方法一搭建成功的&#xff0c;方法三是另外的方法&#xff0c;安装包有不同&#xff0c;实践也能安装成功。 方法一&#xff1a; 1.搭建安…

halcon单目相机标定

1.参考这边文章https://blog.csdn.net/weixin_60275604/article/details/139068423 2.代码 dev_close_window() dev_open_window(0, 0, 512, 512, black, WindowHandle) dev_set_draw(margin)***创建一个标定板参数 xNum,yNum标定板中行列标定点个数 MarkDist标定点中心距离 d…

Vue|插件

在 Vue.js 中&#xff0c;插件是用来扩展 Vue 功能的一种方式&#xff0c;能够帮助开发者扩展和复用功能。通过合理使用插件&#xff0c;可以提高代码的组织性和可维护性 目录 如何使用插件?插件的定义创建及使用插件插件的参数插件的扩展 总结 如何使用插件? 插件的定义 插…

洛汗2保姆级辅助教程攻略:VMOS云手机辅助升级打怪!

在《洛汗2》中&#xff0c;玩家将进入一个充满魔幻色彩的西方世界&#xff0c;体验多种族文明的兴衰与冒险。为了更好地享受这款由普雷威&#xff08;Playwith&#xff09;开发的角色扮演动作手游&#xff0c;使用VMOS云手机将是一个明智的选择。VMOS云手机专为游戏打造了定制版…

Gartner最新指南:如何通过开展红队演习提高网络弹性

由于事件和监管要求不断增加&#xff0c;安全和风险管理领导者努力建立网络弹性并有效管理网络威胁。本研究指导这些领导者制定红队计划以支持弹性及其关键组件。 主要发现 根据 2024 年 Gartner 设计和构建现代安全运营调查&#xff0c;73% 的组织认为红队角色对安全运营目标的…