微博情绪分类

news2024/11/20 10:27:43

引自:https://blog.csdn.net/no1xiaoqianqian/article/details/130593783

友好借鉴,总体抄袭。

所需要的文件如下:https://download.csdn.net/download/m0_37567738/88340795






import os
import torch
import torch.nn as nn
import numpy as np


class TextRNN(nn.Module):
    def __init__(self, Config):
        super(TextRNN, self).__init__()
        self.hidden_size = 128  # lstm隐藏层
        self.num_layers = 2  # lstm层数
        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)
        self.lstm = nn.LSTM(Config.embed_dim, self.hidden_size, self.num_layers,
                            bidirectional=True, batch_first=True, dropout=Config.dropout)
        self.fc = nn.Linear(self.hidden_size * 2, Config.num_classes)

    def forward(self, x):
        out = self.embedding(x)  # [batch_size, seq_len, embeding]=[128, 32, 300]
        out, _ = self.lstm(out)
        out = self.fc(out[:, -1, :])  # 句子最后时刻的 hidden state
        return out

    
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy


class Transformer(nn.Module):
    def __init__(self, Config):
        super(Transformer, self).__init__()

        self.hidden = 1024
        self.last_hidden = 512
        self.num_head = 5
        self.num_encoder = 2
        self.dim_model = 300

        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)

        self.postion_embedding = Positional_Encoding(Config.embed_dim, Config.all_seq_len, Config.dropout, Config.device)
        self.encoder = Encoder(self.dim_model, self.num_head, self.hidden, Config.dropout)
        self.encoders = nn.ModuleList([
            copy.deepcopy(self.encoder)
            # Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
            for _ in range(self.num_encoder)])

        self.fc1 = nn.Linear(Config.all_seq_len * self.dim_model, Config.num_classes)
        # self.fc2 = nn.Linear(config.last_hidden, config.num_classes)
        # self.fc1 = nn.Linear(config.dim_model, config.num_classes)

    def forward(self, x):
        out = self.embedding(x)
        out = self.postion_embedding(out)
        for encoder in self.encoders:
            out = encoder(out)
        out = out.view(out.size(0), -1)
        # out = torch.mean(out, 1)
        out = self.fc1(out)
        return out


class Encoder(nn.Module):
    def __init__(self, dim_model, num_head, hidden, dropout):
        super(Encoder, self).__init__()
        self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
        self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout)

    def forward(self, x):
        out = self.attention(x)
        out = self.feed_forward(out)
        return out


class Positional_Encoding(nn.Module):
    def __init__(self, embed, pad_size, dropout, device):
        super(Positional_Encoding, self).__init__()
        self.device = device
        self.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed)) for i in range(embed)] for pos in range(pad_size)])
        self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])
        self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        out = x + nn.Parameter(self.pe, requires_grad=False).to(self.device)
        out = self.dropout(out)
        return out


class Scaled_Dot_Product_Attention(nn.Module):
    '''Scaled Dot-Product Attention '''
    def __init__(self):
        super(Scaled_Dot_Product_Attention, self).__init__()

    def forward(self, Q, K, V, scale=None):
        '''
        Args:
            Q: [batch_size, len_Q, dim_Q]
            K: [batch_size, len_K, dim_K]
            V: [batch_size, len_V, dim_V]
            scale: 缩放因子 论文为根号dim_K
        Return:
            self-attention后的张量,以及attention张量
        '''
        attention = torch.matmul(Q, K.permute(0, 2, 1))
        if scale:
            attention = attention * scale
        # if mask:  # TODO change this
        #     attention = attention.masked_fill_(mask == 0, -1e9)
        attention = F.softmax(attention, dim=-1)
        context = torch.matmul(attention, V)
        return context


class Multi_Head_Attention(nn.Module):
    def __init__(self, dim_model, num_head, dropout=0.0):
        super(Multi_Head_Attention, self).__init__()
        self.num_head = num_head
        assert dim_model % num_head == 0
        self.dim_head = dim_model // self.num_head
        self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
        self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
        self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
        self.attention = Scaled_Dot_Product_Attention()
        self.fc = nn.Linear(num_head * self.dim_head, dim_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(dim_model)

    def forward(self, x):
        batch_size = x.size(0)
        Q = self.fc_Q(x)
        K = self.fc_K(x)
        V = self.fc_V(x)
        Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
        K = K.view(batch_size * self.num_head, -1, self.dim_head)
        V = V.view(batch_size * self.num_head, -1, self.dim_head)
        # if mask:  # TODO
        #     mask = mask.repeat(self.num_head, 1, 1)  # TODO change this
        scale = K.size(-1) ** -0.5  # 缩放因子
        context = self.attention(Q, K, V, scale)

        context = context.view(batch_size, -1, self.dim_head * self.num_head)
        out = self.fc(context)
        out = self.dropout(out)
        out = out + x  # 残差连接
        out = self.layer_norm(out)
        return out


class Position_wise_Feed_Forward(nn.Module):
    def __init__(self, dim_model, hidden, dropout=0.0):
        super(Position_wise_Feed_Forward, self).__init__()
        self.fc1 = nn.Linear(dim_model, hidden)
        self.fc2 = nn.Linear(hidden, dim_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(dim_model)

    def forward(self, x):
        out = self.fc1(x)
        out = F.relu(out)
        out = self.fc2(out)
        out = self.dropout(out)
        out = out + x  # 残差连接
        out = self.layer_norm(out)
        return out
    
    
    
import torch.nn as nn
import torch
import torch.nn.functional as F


class TextCNN(nn.Module):
    def __init__(self, Config):
        super(TextCNN, self).__init__()

        self.filter_sizes = (2, 3, 4)  # 卷积核尺寸
        self.num_filters = 64  # 卷积核数量(channels数)

        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, self.num_filters, (k, Config.embed_dim)) for k in self.filter_sizes])
        self.dropout = nn.Dropout(Config.dropout)
        self.fc = nn.Linear(self.num_filters * len(self.filter_sizes), Config.num_classes)

    def conv_and_pool(self, x, conv):
        x = F.relu(conv(x))
        x = x.squeeze(3)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x

    def forward(self, x):
        out = self.embedding(x)
        out = out.unsqueeze(1)
        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
        out = self.dropout(out)
        out = self.fc(out)
        return out
    

    
    
import matplotlib.pyplot as plt
import numpy as np


def draw_loss_pic(train_loss, test_loss, y):
    x = np.linspace(0, len(train_loss), len(train_loss))
    plt.plot(x, train_loss, label="train_" + y, linewidth=1.5)
    plt.plot(x, test_loss, label="test_" + y, linewidth=1.5)
    plt.xlabel("epoch")
    plt.ylabel(y)
    plt.legend()
    plt.show()
    
    

    
import torch


class Config():
    train_data_path = '../data/virus_train.txt'
    test_data_path = '../data/virus_eval_labeled.txt'
    vocab_path = '../data/vocab.pkl'
    split_word_all_path = '../data/split_word_all.txt'
    model_file_name_path = '../data/vec_model.txt'
    id_vec_path = '../data/id_vec.pkl'

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    word_level = True   # 按照字级别进行分词

    embedding_pretrained = False   # 是否使用预训练的词向量

    label_fields = {'neural': 0, 'happy': 1, 'angry': 2, 'sad': 3, 'fear': 4, 'surprise': 5}
    all_seq_len = 64  # 句子长度,长剪短补

    batch_size = 128
    learning_rate = 0.0001
    epoches = 50
    dropout = 0.5
    num_classes = 6

    embed_dim = 300
    n_vocab = 0

    
    
import re
import os
import json
#import jieba
import pickle as pkl
import numpy as np
import gensim.models.word2vec as w2v
import torch
#from src.Config import Config
import torch.utils.data as Data

train_data_path = Config.train_data_path
test_data_path = Config.test_data_path
vocab_path = Config.vocab_path

label_fields = Config.label_fields
all_seq_len = Config.all_seq_len

UNK, PAD = '<UNK>', '<PAD>'  # 未知字,padding符号


def build_vocab(content_list, tokenizer):
    file_split_word = open(Config.split_word_all_path, 'w', encoding='utf-8')
    vocab_dic = {}
    for content in content_list:
        word_lines = []
        for word in tokenizer(content):
            vocab_dic[word] = vocab_dic.get(word, 0) + 1
            word_lines.append(word)

        str = " ".join(word_lines) + "\n"
        file_split_word.write(str)

    file_split_word.close()
    vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
    vocab_dic = {word_count: idx for idx, word_count in enumerate(vocab_dic)}
    return vocab_dic


def build_id_vec(vocab_dic, model):
    model.wv.add_vector(UNK, np.zeros(300))
    model.wv.add_vector(PAD, np.ones(300))
    id2vec = {}
    for word in vocab_dic.keys():
        id = vocab_dic.get(word, vocab_dic.get(UNK))
        vec = model.wv.get_vector(word)
        id2vec.update({id: vec})
    return id2vec


def train_vec():
    model_file_name = Config.model_file_name_path
    sentences = w2v.LineSentence(Config.split_word_all_path)
    model = w2v.Word2Vec(sentences, vector_size=300, window=20, min_count=0)
    model.save(model_file_name)


def load_data(root):
    content_list = []
    content_token_list = []
    label_list = []
    if Config.word_level:
        tokenizer = lambda x: [y for y in x]
    else:
        tokenizer = lambda x: jieba.cut(x, cut_all=False)

    file = open(root, 'r', encoding='utf-8')

    datas = json.load(file)
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?|\[|\]]')
    pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?]')
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。]')       # seq_len=32 CNN:67%-68%  RNN:61%-62%  Transformer:63-64%
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!]')       # CNN:65%-66%
    for data in datas:
        content_after_clean = re.sub(pattern, '', data['content'])
        content_list.append(content_after_clean)
        label_list.append(label_fields[data['label']])

    if os.path.exists(vocab_path):
        vocab = pkl.load(open(vocab_path, 'rb'))
    else:
        vocab = build_vocab(content_list, tokenizer)
        pkl.dump(vocab, open(vocab_path, 'wb'))
        if Config.embedding_pretrained:
            train_vec()
            model = w2v.Word2Vec.load(Config.model_file_name_path)
            id_vec = build_id_vec(vocab, model)
            pkl.dump(id_vec, open(Config.id_vec_path, 'wb'))

    for content in content_list:
        word_line = []
        token = list(tokenizer(content))
        seq_len = len(token)
        if seq_len < all_seq_len:
            token.extend([PAD] * (all_seq_len - seq_len))
        else:
            token = token[:all_seq_len]

        for word in token:
            word_line.append(vocab.get(word, vocab.get(UNK)))

        content_token_list.append(word_line)

    n_vocab = len(vocab)

    return content_token_list, label_list, n_vocab


class WeiBboDataset(Data.Dataset):
    def __init__(self, content_token_list, label_list):
        super(WeiBboDataset, self).__init__()
        self.content_token_list = content_token_list
        self.label_list = label_list

    def __getitem__(self, index):
        label = float(self.label_list[index])
        return torch.tensor(self.content_token_list[index]), torch.tensor(label)

    def __len__(self):
        return len(self.label_list)


def get_data(batch_size):
    train_content_token_list, train_label_list, n_vocab = load_data(train_data_path)
    test_content_token_list, test_label_list, _ = load_data(test_data_path)

    train_dataset = WeiBboDataset(train_content_token_list, train_label_list)
    test_dataset = WeiBboDataset(test_content_token_list, test_label_list)

    train_dataloader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = Data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
    return train_dataloader, test_dataloader, n_vocab


if __name__ == '__main__':
    get_data(32)

    
    
import os
import torch
import torch.nn as nn
from torch.autograd import Variable
#from utils.draw_loss_pic import draw_loss_pic

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def train(net, loss, optimizer, train_loader, test_loader, epoches, device):
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []
    for epoch in range(epoches):
        net.train()
        total_loss = 0.0
        correct = 0
        sample_num = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            data = data.to(device).long()
            target = target.to(device).long()
            optimizer.zero_grad()
            output = net(data)
            ls = loss(output, target)
            ls.backward()
            optimizer.step()
            total_loss += ls.item()
            sample_num += len(target)
            max_output = output.data.max(1, keepdim=True)[1].view_as(target)
            correct += (max_output == target).sum()

        print('epoch %d, train_loss %f, train_acc: %f' % (epoch + 1, total_loss/sample_num, float(correct.data.item()) / sample_num))
        train_loss.append(total_loss/sample_num)
        train_acc.append(float(correct.data.item()) / sample_num)

        test_ls, test_accury = test(net, test_loader, device, loss)
        test_loss.append(test_ls)
        test_acc.append(test_accury)

    draw_loss_pic(train_loss, test_loss, "loss")
    draw_loss_pic(train_acc, test_acc, "acc")


def test(net, test_loader, device, loss):
    net.eval()
    total_loss = 0.0
    correct = 0
    sample_num = 0
    for batch_idx, (data, target) in enumerate(test_loader):
        data = data.to(device)
        target = target.to(device).long()
        output = net(data)
        ls = loss(output, target)
        total_loss += ls.item()
        sample_num += len(target)
        max_output = output.data.max(1, keepdim=True)[1].view_as(target)
        correct += (max_output == target).sum()

    print('test_loss %f, test_acc: %f' % (
        total_loss / sample_num, float(correct.data.item()) / sample_num))
    return total_loss / sample_num, float(correct.data.item()) / sample_num



import torch
import torch.nn as nn
import torch.optim as optim
import pickle as pkl
#from src.models.textCNN import TextCNN
#from src.models.textRNN import TextRNN
#from src.models.Transformer import Transformer
#from src.Config import Config
#from src.get_data import get_data
#from src.train import train

if __name__ == '__main__':
    config = Config()
    batch_size = config.batch_size
    learning_rate = config.learning_rate

    train_dataloader, test_dataloader, n_vocab = get_data(batch_size)
    config.n_vocab = n_vocab

    # model = TextCNN(config).to(Config.device)
    model = TextRNN(config).to(Config.device)
    # model = Transformer(config).to(Config.device)

    # 导入word2vec训练出来的预训练词向量
    id_vec = open(Config.id_vec_path, 'rb')
    id_vec = pkl.load(id_vec)
    id_vec = torch.tensor(list(id_vec.values())).to(Config.device)
    if config.embedding_pretrained:
        model.embedding = nn.Embedding.from_pretrained(id_vec)

    loss = nn.CrossEntropyLoss().to(Config.device)
    optimizer = optim.Adam(params=model.parameters(), lr=learning_rate)

    train(model, loss, optimizer, train_dataloader, test_dataloader, Config.epoches, Config.device)


运行结果(准确率和错误率):

正确率达到85%。
在这里插入图片描述

在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1011026.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

pyqt与opencv-qt冲突解决办法

问题&#xff1a;pyqt显示不出界面 问题分析&#xff1a; 根据报错可以看出程序找到了libxcb.so&#xff0c;但是由于某些原因并不能够调用该驱动&#xff0c;这是因为pyqt5与opencv里的qt差生了冲突&#xff0c;这说明opencv内部的插件与pyqt5所使用的插件不兼容&#xff0c;因…

Elasticsearch 快速开始

Elasticsearch 是一个分布式的 RESTful 风格的搜索和数据分析引擎。 查询 &#xff1a; Elasticsearch 允许执行和合并多种类型的搜索 — 结构化、非结构化、地理位置、度量指标 — 搜索方式随心而变。分析 &#xff1a; 找到与查询最匹配的十个文档是一回事。但是如果面对的是…

zemax场曲与消场曲

场曲&#xff0c;像场弯曲&#xff0c;指的是平面物体通过透镜系统后&#xff0c;所有平面物点聚焦后的像面和理想平面不重合。 呈现一个弯曲的像面 单透镜为例&#xff1a; 此时聚焦显然不在一个平面上&#xff1a; 点列图可以观察到场曲的存在&#xff1a; 我们引入实际图…

R拒绝访问的解决方案

Win11系统 安装rms的时候报错&#xff1a; Error in loadNamespace(j <- i[[1L]], c(lib.loc, .libPaths()), versionCheck vI[[j]]) : namespace Matrix 1.5-4.1 is already loaded, but > 1.6.0 is required## 安装rms的时候报错&#xff0c;显示Matrix的版本太低…

Linux日志管理-logrotate(crontab定时任务、Ceph日志转储)

文章目录 一、logrotate概述二、logrotate基本用法三、logrotate运行机制logrotate参数 四、logrotate是怎么做到滚动日志时不影响程序正常的日志输出呢&#xff1f;Linux文件操作机制方案一方案二 五、logrotate实战--Ceph日志转储参考 一、logrotate概述 logrotate是一个用于…

Java 华为真题-选修课

需求&#xff1a; 现有两门选修课&#xff0c;每门选修课都有一部分学生选修&#xff0c;每个学生都有选修课的成绩&#xff0c;需要你找出同时选修了两门选修课的学生&#xff0c;先按照班级进行划分&#xff0c;班级编号小的先输出&#xff0c;每个班级按照两门选修课成绩和的…

下载CentOS ISO镜像 (一)

总目录 https://preparedata.blog.csdn.net/article/details/132877836 文章目录 总目录一、下载CentOS 镜像 一、下载CentOS 镜像 官网下载&#xff1a;https://www.centos.org/download/ Centos Linux 和 CentOS Stream 的区别&#xff1a;https://www.centos.org/cl-vs-cs…

设计模式(2) - 创建型模式

创建型模式指的是 创建对象 或是 获取实例 的方式。 1、工厂模式 平时写一些简单的代码可能会直接用 new 创建出一个对象&#xff0c;但是实际在阅读一些功能比较多、规模比较庞大的工程时&#xff0c;可能会发现有多个类继承于同一个基类的情况&#xff0c;它们拥有同样的接口…

删除安装Google Chrome浏览器时捆绑安装的Google 文档、表格、幻灯片、Gmail、Google 云端硬盘、YouTube网址链接(Mac)

删除安装Google Chrome浏览器时捆绑安装的Google 文档、表格、幻灯片、Gmail、Google 云端硬盘、YouTube网址链接(Mac) Mac mini操作系统&#xff0c;安装完 Google Chrome 浏览器以后&#xff0c;单击 启动台 桌面左下角的“显示应用程序”&#xff0c;我们发现捆绑安装了 Goo…

ArcGis10.8安装教程!

1、找到arcgis10.8中文安装包和Crack破解文件夹 2、运行"ArcGIS.exe"程序&#xff0c;进入安装向导&#xff1b;默认路径点下一步 3、注意&#xff0c;需要Python 2.7、Numpy、Matplotlib的支持 4、建议取消此处的勾选&#xff0c;开始进行安装 5、安装完成 6、…

【npm】npm私有库的使用-绑定

注册npm账户 输入基本信息 验证 收一次性验证码 登录 本地绑定 全局绑定了其他的私有库 若要在专门发包的项目中&#xff0c;发包到自己的私有库&#xff0c;需要在项目文件夹中创建一个.npmrc文件 创建文件 可以直接在项目目录下输入touch .npmrc创建文件 文件内容 regi…

C++之保存编译全部中间文件(二百一十五)

简介&#xff1a; CSDN博客专家&#xff0c;专注Android/Linux系统&#xff0c;分享多mic语音方案、音视频、编解码等技术&#xff0c;与大家一起成长&#xff01; 优质专栏&#xff1a;Audio工程师进阶系列【原创干货持续更新中……】&#x1f680; 人生格言&#xff1a; 人生…

JavaScript学习笔记03

JavaScript笔记03 流程控制 if 判断 和 Java 中if语句的使用方法相同。例&#xff1a; <!DOCTYPE html> <html lang"en"> <head><meta charset"UTF-8"><title>Title</title><script>"use strict"…

OPCAE扫盲

目录 1 基本概念 1.1 服务器/客户端 1.2 区域 1.3 报警/条件 1.4 事件 2 条件概念 2.1 子条件 2.2 OPCConditions属性 2.3 Condition质量 2.4 OPCSubConditions属性 2.5 Condition定义 2.6 严重性 2.7 Condition启用/禁用 2.8 Area启用/禁用 2.9 Condition状态集…

域控操作一:更换域用户桌面背景

1,创建背景图片文件夹并设置共享文件夹&#xff1a; 创建文件夹&#xff0c;将图片放进去&#xff0c;设置共享&#xff0c;权限改为Everyone 2&#xff0c;打开域控服务器设置组策略 在需要的组织单位OU内创建GPO设置名字为统一桌面背景 用户配置–管理模板–桌面–桌面 Act…

期权怎样的加仓才是合理的加仓?

期权加仓的手法是期权投资中常见的一种操作的手段,一般是在行情有大涨趋势的时候,投资者通过追加仓位来扩大收益和缩小持仓成本的策略&#xff0c;下文为大家介绍期权怎样的加仓才是合理的加仓&#xff1f;本文来自&#xff1a;期权酱 一、期权交易怎么加仓最合适&#xff1f;期…

[H5动画制作系列 ] Text及Button 的基础原理Demo

准备工作: 舞台上方是个动态文本框,名称为:myText,舞台下方是一个按钮元件(myButton)的实例,名称是:myButton1,当点击按钮时,能够在文本框上和控制台(console)输出:当前帧号以及全局i的变量值。建立两个图层,一个图层布局按钮和文本框,另一个图层专门部署代码。 操作步骤: 步…

WebGL 正确处理对象前后的关系——隐藏面消除(深度测试)/ 深度冲突

目录 前言 验证WebGL处理对象前后关系的规则——后绘制的图形覆盖先绘制的图形 隐藏面消除&#xff08;深度测试&#xff09; 开启隐藏面消除功能&#xff0c;需要遵循以下两步&#xff1a; 1.开启隐藏面消除功能。 gl.enable&#xff08;&#xff09;函数规范 2.在绘制…

计算机毕业设计 基于SSM的电影推荐系统的设计与实现 Java实战项目 附源码+文档+视频讲解

博主介绍&#xff1a;✌从事软件开发10年之余&#xff0c;专注于Java技术领域、Python人工智能及数据挖掘、小程序项目开发和Android项目开发等。CSDN、掘金、华为云、InfoQ、阿里云等平台优质作者✌ &#x1f345;文末获取源码联系&#x1f345; &#x1f447;&#x1f3fb; 精…

zabbix 钉钉微信企微告警(动作操作消息内容模板)

一、环境配置 1、配置zabbix服务端 2、配置监控主机&监控项&监控模板 zabbix配置安装_this page is used to test the proper operation of _疯飙的蜗牛的博客-CSDN博客 二、触发器 触发器的本质就是一个条件判断&#xff0c;对于不同的监控数据来说&#xff0c;我…