基于TextCNN、LSTM与Transformer模型的疫情微博情绪分类

news2024/11/28 20:41:10

基于TextCNN、LSTM与Transformer模型的疫情微博情绪分类

任务概述

微博情绪分类任务旨在识别微博中蕴含的情绪,输入是一条微博,输出是该微博所蕴含的情绪类别。在本次任务中,我们将微博按照其蕴含的情绪分为以下六个类别之一:积极、愤怒、悲伤、恐惧、惊奇和无情绪。

  • 数据集来源
    本数据集(疫情微博数据集)内的微博内容是在疫情期间使用相关关键字筛选获得的疫情微博,其内容与新冠疫情相关
  • 数据集标签
    每条微博被标注为以下六个类别之一: neural (无情绪)、angry (愤怒)、sad (悲伤)、surprise (惊奇)。
  • 数据集规模
    疫情微博训练数据集包括6,606条微博,测试数据集包含5,000条微博。
  • 数据集形式
    数据集为json格式,包含三个字段:数据编号(id),文本(content),情绪标签(label)。
    示例: {“id”: 11, “content”: “武汉加油!中国加油!安徽加油!”, “label”: “happy”}
  • 下载
    链接:https://pan.baidu.com/s/13_czouycHR8mK0pHzuH7gw
    提取码:t81p

实验设计

文件框架

简单易懂,不多逼逼
在这里插入图片描述

main函数

import torch
import torch.nn as nn
import torch.optim as optim
import pickle as pkl
from src.models.textCNN import TextCNN
from src.models.textRNN import TextRNN
from src.models.Transformer import Transformer
from src.Config import Config
from src.get_data import get_data
from src.train import train

if __name__ == '__main__':
    config = Config()
    batch_size = config.batch_size
    learning_rate = config.learning_rate

    train_dataloader, test_dataloader, n_vocab = get_data(batch_size)
    config.n_vocab = n_vocab

    # model = TextCNN(config).to(Config.device)
    model = TextRNN(config).to(Config.device)
    # model = Transformer(config).to(Config.device)

    # 导入word2vec训练出来的预训练词向量
    id_vec = open(Config.id_vec_path, 'rb')
    id_vec = pkl.load(id_vec)
    id_vec = torch.tensor(list(id_vec.values())).to(Config.device)
    if config.embedding_pretrained:
        model.embedding = nn.Embedding.from_pretrained(id_vec)

    loss = nn.CrossEntropyLoss().to(Config.device)
    optimizer = optim.Adam(params=model.parameters(), lr=learning_rate)

    train(model, loss, optimizer, train_dataloader, test_dataloader, Config.epoches, Config.device)

配置信息

import torch


class Config():
    train_data_path = '../data/virus_train.txt'
    test_data_path = '../data/virus_eval_labeled.txt'
    vocab_path = '../data/vocab.pkl'
    split_word_all_path = '../data/split_word_all.txt'
    model_file_name_path = '../data/vec_model.txt'
    id_vec_path = '../data/id_vec.pkl'

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    word_level = True   # 按照字级别进行分词

    embedding_pretrained = False   # 是否使用预训练的词向量

    label_fields = {'neural': 0, 'happy': 1, 'angry': 2, 'sad': 3, 'fear': 4, 'surprise': 5}
    all_seq_len = 64  # 句子长度,长剪短补

    batch_size = 128
    learning_rate = 0.0001
    epoches = 50
    dropout = 0.5
    num_classes = 6

    embed_dim = 300
    n_vocab = 0

数据准备

import re
import os
import json
import jieba
import pickle as pkl
import numpy as np
import gensim.models.word2vec as w2v
import torch
from src.Config import Config
import torch.utils.data as Data

train_data_path = Config.train_data_path
test_data_path = Config.test_data_path
vocab_path = Config.vocab_path

label_fields = Config.label_fields
all_seq_len = Config.all_seq_len

UNK, PAD = '<UNK>', '<PAD>'  # 未知字,padding符号

# 构造字典
def build_vocab(content_list, tokenizer):
    file_split_word = open(Config.split_word_all_path, 'w', encoding='utf-8')
    vocab_dic = {}
    for content in content_list:
        word_lines = []
        for word in tokenizer(content):
            vocab_dic[word] = vocab_dic.get(word, 0) + 1
            word_lines.append(word)

        str = " ".join(word_lines) + "\n"
        file_split_word.write(str)

    file_split_word.close()
    vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
    vocab_dic = {word_count: idx for idx, word_count in enumerate(vocab_dic)}
    return vocab_dic

def build_id_vec(vocab_dic, model):
    model.wv.add_vector(UNK, np.zeros(300))
    model.wv.add_vector(PAD, np.ones(300))
    id2vec = {}
    for word in vocab_dic.keys():
        id = vocab_dic.get(word, vocab_dic.get(UNK))
        vec = model.wv.get_vector(word)
        id2vec.update({id: vec})
    return id2vec

# 预训练词向量
def train_vec():
    model_file_name = Config.model_file_name_path
    sentences = w2v.LineSentence(Config.split_word_all_path)
    model = w2v.Word2Vec(sentences, vector_size=300, window=20, min_count=0)
    model.save(model_file_name)

# 读入数据与数据清洗
def load_data(root):
    content_list = []
    content_token_list = []
    label_list = []
    # 不同分词器
    if Config.word_level:
        tokenizer = lambda x: [y for y in x]
    else:
        tokenizer = lambda x: jieba.cut(x, cut_all=False)

    file = open(root, 'r', encoding='utf-8')

    datas = json.load(file)
	# 多种数据清洗方式
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?|\[|\]]')
    pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!|?]')
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。]')       # seq_len=32 CNN:67%-68%  RNN:61%-62%  Transformer:63-64%
    # pattern = re.compile(r'[^\u4e00-\u9fa5|,|。|!]')       # CNN:65%-66%
    for data in datas:
        content_after_clean = re.sub(pattern, '', data['content'])
        content_list.append(content_after_clean)
        label_list.append(label_fields[data['label']])

    if os.path.exists(vocab_path):
        vocab = pkl.load(open(vocab_path, 'rb'))
    else:
        vocab = build_vocab(content_list, tokenizer)
        pkl.dump(vocab, open(vocab_path, 'wb'))
        if Config.embedding_pretrained:
            train_vec()
            model = w2v.Word2Vec.load(Config.model_file_name_path)
            id_vec = build_id_vec(vocab, model)
            pkl.dump(id_vec, open(Config.id_vec_path, 'wb'))

    for content in content_list:
        word_line = []
        token = list(tokenizer(content))
        seq_len = len(token)
        if seq_len < all_seq_len:
            token.extend([PAD] * (all_seq_len - seq_len))
        else:
            token = token[:all_seq_len]

        for word in token:
            word_line.append(vocab.get(word, vocab.get(UNK)))

        content_token_list.append(word_line)

    n_vocab = len(vocab)

    return content_token_list, label_list, n_vocab

# 将数据映射为Dataset
class WeiBboDataset(Data.Dataset):
    def __init__(self, content_token_list, label_list):
        super(WeiBboDataset, self).__init__()
        self.content_token_list = content_token_list
        self.label_list = label_list

    def __getitem__(self, index):
        label = float(self.label_list[index])
        return torch.tensor(self.content_token_list[index]), torch.tensor(label)

    def __len__(self):
        return len(self.label_list)

# 核心函数
def get_data(batch_size):
    train_content_token_list, train_label_list, n_vocab = load_data(train_data_path)
    test_content_token_list, test_label_list, _ = load_data(test_data_path)

    train_dataset = WeiBboDataset(train_content_token_list, train_label_list)
    test_dataset = WeiBboDataset(test_content_token_list, test_label_list)

    train_dataloader = Data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
    test_dataloader = Data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
    return train_dataloader, test_dataloader, n_vocab


if __name__ == '__main__':
    get_data(32)

模型搭建

TextCNN

import torch.nn as nn
import torch
import torch.nn.functional as F


class TextCNN(nn.Module):
    def __init__(self, Config):
        super(TextCNN, self).__init__()

        self.filter_sizes = (2, 3, 4)  # 卷积核尺寸
        self.num_filters = 64  # 卷积核数量(channels数)

        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)
        self.convs = nn.ModuleList(
            [nn.Conv2d(1, self.num_filters, (k, Config.embed_dim)) for k in self.filter_sizes])
        self.dropout = nn.Dropout(Config.dropout)
        self.fc = nn.Linear(self.num_filters * len(self.filter_sizes), Config.num_classes)

    def conv_and_pool(self, x, conv):
        x = F.relu(conv(x))
        x = x.squeeze(3)
        x = F.max_pool1d(x, x.size(2)).squeeze(2)
        return x

    def forward(self, x):
        out = self.embedding(x)
        out = out.unsqueeze(1)
        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)
        out = self.dropout(out)
        out = self.fc(out)
        return out

LSTM

import os
import torch
import torch.nn as nn
import numpy as np


class TextRNN(nn.Module):
    def __init__(self, Config):
        super(TextRNN, self).__init__()
        self.hidden_size = 128  # lstm隐藏层
        self.num_layers = 2  # lstm层数
        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)
        self.lstm = nn.LSTM(Config.embed_dim, self.hidden_size, self.num_layers,
                            bidirectional=True, batch_first=True, dropout=Config.dropout)
        self.fc = nn.Linear(self.hidden_size * 2, Config.num_classes)

    def forward(self, x):
        out = self.embedding(x)  # [batch_size, seq_len, embeding]=[128, 32, 300]
        out, _ = self.lstm(out)
        out = self.fc(out[:, -1, :])  # 句子最后时刻的 hidden state
        return out

Transformer

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import copy


class Transformer(nn.Module):
    def __init__(self, Config):
        super(Transformer, self).__init__()

        self.hidden = 1024
        self.last_hidden = 512
        self.num_head = 5
        self.num_encoder = 2
        self.dim_model = 300

        self.embedding = nn.Embedding(Config.n_vocab, Config.embed_dim)

        self.postion_embedding = Positional_Encoding(Config.embed_dim, Config.all_seq_len, Config.dropout, Config.device)
        self.encoder = Encoder(self.dim_model, self.num_head, self.hidden, Config.dropout)
        self.encoders = nn.ModuleList([
            copy.deepcopy(self.encoder)
            # Encoder(config.dim_model, config.num_head, config.hidden, config.dropout)
            for _ in range(self.num_encoder)])

        self.fc1 = nn.Linear(Config.all_seq_len * self.dim_model, Config.num_classes)
        # self.fc2 = nn.Linear(config.last_hidden, config.num_classes)
        # self.fc1 = nn.Linear(config.dim_model, config.num_classes)

    def forward(self, x):
        out = self.embedding(x)
        out = self.postion_embedding(out)
        for encoder in self.encoders:
            out = encoder(out)
        out = out.view(out.size(0), -1)
        # out = torch.mean(out, 1)
        out = self.fc1(out)
        return out


class Encoder(nn.Module):
    def __init__(self, dim_model, num_head, hidden, dropout):
        super(Encoder, self).__init__()
        self.attention = Multi_Head_Attention(dim_model, num_head, dropout)
        self.feed_forward = Position_wise_Feed_Forward(dim_model, hidden, dropout)

    def forward(self, x):
        out = self.attention(x)
        out = self.feed_forward(out)
        return out


class Positional_Encoding(nn.Module):
    def __init__(self, embed, pad_size, dropout, device):
        super(Positional_Encoding, self).__init__()
        self.device = device
        self.pe = torch.tensor([[pos / (10000.0 ** (i // 2 * 2.0 / embed)) for i in range(embed)] for pos in range(pad_size)])
        self.pe[:, 0::2] = np.sin(self.pe[:, 0::2])
        self.pe[:, 1::2] = np.cos(self.pe[:, 1::2])
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        out = x + nn.Parameter(self.pe, requires_grad=False).to(self.device)
        out = self.dropout(out)
        return out


class Scaled_Dot_Product_Attention(nn.Module):
    '''Scaled Dot-Product Attention '''
    def __init__(self):
        super(Scaled_Dot_Product_Attention, self).__init__()

    def forward(self, Q, K, V, scale=None):
        '''
        Args:
            Q: [batch_size, len_Q, dim_Q]
            K: [batch_size, len_K, dim_K]
            V: [batch_size, len_V, dim_V]
            scale: 缩放因子 论文为根号dim_K
        Return:
            self-attention后的张量,以及attention张量
        '''
        attention = torch.matmul(Q, K.permute(0, 2, 1))
        if scale:
            attention = attention * scale
        # if mask:  # TODO change this
        #     attention = attention.masked_fill_(mask == 0, -1e9)
        attention = F.softmax(attention, dim=-1)
        context = torch.matmul(attention, V)
        return context


class Multi_Head_Attention(nn.Module):
    def __init__(self, dim_model, num_head, dropout=0.0):
        super(Multi_Head_Attention, self).__init__()
        self.num_head = num_head
        assert dim_model % num_head == 0
        self.dim_head = dim_model // self.num_head
        self.fc_Q = nn.Linear(dim_model, num_head * self.dim_head)
        self.fc_K = nn.Linear(dim_model, num_head * self.dim_head)
        self.fc_V = nn.Linear(dim_model, num_head * self.dim_head)
        self.attention = Scaled_Dot_Product_Attention()
        self.fc = nn.Linear(num_head * self.dim_head, dim_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(dim_model)

    def forward(self, x):
        batch_size = x.size(0)
        Q = self.fc_Q(x)
        K = self.fc_K(x)
        V = self.fc_V(x)
        Q = Q.view(batch_size * self.num_head, -1, self.dim_head)
        K = K.view(batch_size * self.num_head, -1, self.dim_head)
        V = V.view(batch_size * self.num_head, -1, self.dim_head)
        # if mask:  # TODO
        #     mask = mask.repeat(self.num_head, 1, 1)  # TODO change this
        scale = K.size(-1) ** -0.5  # 缩放因子
        context = self.attention(Q, K, V, scale)

        context = context.view(batch_size, -1, self.dim_head * self.num_head)
        out = self.fc(context)
        out = self.dropout(out)
        out = out + x  # 残差连接
        out = self.layer_norm(out)
        return out


class Position_wise_Feed_Forward(nn.Module):
    def __init__(self, dim_model, hidden, dropout=0.0):
        super(Position_wise_Feed_Forward, self).__init__()
        self.fc1 = nn.Linear(dim_model, hidden)
        self.fc2 = nn.Linear(hidden, dim_model)
        self.dropout = nn.Dropout(dropout)
        self.layer_norm = nn.LayerNorm(dim_model)

    def forward(self, x):
        out = self.fc1(x)
        out = F.relu(out)
        out = self.fc2(out)
        out = self.dropout(out)
        out = out + x  # 残差连接
        out = self.layer_norm(out)
        return out

训练和测试

import os
import torch
import torch.nn as nn
from torch.autograd import Variable
from utils.draw_loss_pic import draw_loss_pic

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"


def train(net, loss, optimizer, train_loader, test_loader, epoches, device):
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []
    for epoch in range(epoches):
        net.train()
        total_loss = 0.0
        correct = 0
        sample_num = 0
        for batch_idx, (data, target) in enumerate(train_loader):
            data = data.to(device).long()
            target = target.to(device).long()
            optimizer.zero_grad()
            output = net(data)
            ls = loss(output, target)
            ls.backward()
            optimizer.step()
            total_loss += ls.item()
            sample_num += len(target)
            max_output = output.data.max(1, keepdim=True)[1].view_as(target)
            correct += (max_output == target).sum()

        print('epoch %d, train_loss %f, train_acc: %f' % (epoch + 1, total_loss/sample_num, float(correct.data.item()) / sample_num))
        train_loss.append(total_loss/sample_num)
        train_acc.append(float(correct.data.item()) / sample_num)

        test_ls, test_accury = test(net, test_loader, device, loss)
        test_loss.append(test_ls)
        test_acc.append(test_accury)

    draw_loss_pic(train_loss, test_loss, "loss")
    draw_loss_pic(train_acc, test_acc, "acc")


def test(net, test_loader, device, loss):
    net.eval()
    total_loss = 0.0
    correct = 0
    sample_num = 0
    for batch_idx, (data, target) in enumerate(test_loader):
        data = data.to(device)
        target = target.to(device).long()
        output = net(data)
        ls = loss(output, target)
        total_loss += ls.item()
        sample_num += len(target)
        max_output = output.data.max(1, keepdim=True)[1].view_as(target)
        correct += (max_output == target).sum()

    print('test_loss %f, test_acc: %f' % (
        total_loss / sample_num, float(correct.data.item()) / sample_num))
    return total_loss / sample_num, float(correct.data.item()) / sample_num

运行结果

在分词器为按字进行分词、句子长度为64、batch_size为128、learning_rate为0.0001、数据清洗方式为“保留中文、逗号、句号、感叹号、问号、emoji(带中括号)”,训练模型为CNN的情况下,损失曲线和准确率曲线如下图所示:
在这里插入图片描述
在这里插入图片描述

参考

https://blog.csdn.net/cui_yonghua/article/details/121094116

https://ask.csdn.net/questions/672138?ops_request_misc&request_id&biz_id=106&utm_term=re%E5%BA%93%E5%8C%B9%E9%85%8D%E4%B8%AD%E6%8B%AC%E5%8F%B7&utm_medium=distribute.pc_search_result.none-task-ask-2~ask~sobaiduweb~default-2-672138.pc_ask&spm=1018.2226.3001.4187

https://blog.csdn.net/Littewood/article/details/123393736?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522167258426316800180660412%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=167258426316800180660412&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduend~default-2-123393736-null-null.142^v68^control,201^v4^add_ask,213^v2^t3_control2&utm_term=lambda%E7%94%A8%E6%B3%95&spm=1018.2226.3001.4449

https://blog.csdn.net/benzhujie1245com/article/details/117173090

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/512707.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Docker部署nacos2.1版本集群

Nacos /nɑ:kəʊs/ 是 Dynamic Naming and Configuration Service的首字母简称&#xff0c;一个更易于构建云原生应用的动态服务发现、配置管理和服务管理平台。 Nacos 致力于帮助您发现、配置和管理微服务。Nacos 提供了一组简单易用的特性集&#xff0c;帮助您快速实现动态服…

spring发送qq邮件 + 模板引擎

文章目录 学习链接邮箱配置开启qq邮箱服务相关配置文件 freemarker模板引擎引入依赖配置freemarker编写模板registerTpl.ftl 发送带内嵌图片的邮件 附件效果 学习链接 java邮件发送 Java实现邮件发送 springboot发送QQ邮件&#xff08;最简单方式&#xff09; 刘java-Java使用…

css - 盒子水平垂直居中的几种方式

前端盒子水平垂直居中的几种方式 实现效果图如下&#xff1a; 首先是父元素的基本样式&#xff1a; .container {width: 600px;height: 600px;border: 1px solid red;background-color: antiquewhite;margin: 0 auto;/* 父盒子开启相对定位 */position: relative;}1&#xf…

【Linux】Linux入门学习之常用命令三

介绍 这里是小编成长之路的历程&#xff0c;也是小编的学习之路。希望和各位大佬们一起成长&#xff01; 以下为小编最喜欢的两句话&#xff1a; 要有最朴素的生活和最遥远的梦想&#xff0c;即使明天天寒地冻&#xff0c;山高水远&#xff0c;路远马亡。 一个人为什么要努力&a…

Python每日一练(20230511) 跳跃游戏 I\II\III\IV

目录 1. 跳跃游戏 Jump Game I 2. 跳跃游戏 Jump Game II 3. 跳跃游戏 Jump Game III 4. 跳跃游戏 Jump Game IV &#x1f31f; 每日一练刷题专栏 &#x1f31f; Golang每日一练 专栏 Python每日一练 专栏 C/C每日一练 专栏 Java每日一练 专栏 1. 跳跃游戏 Jump Game …

操作符知识点大全(简洁,全面,含使用场景,演示,代码)

目录 一.算术操作符 1.要点&#xff1a; 二.负数原码&#xff0c;反码&#xff0c;补码的互推 1.按位取反操作符&#xff1a;~&#xff08;二进制位&#xff09; 2.原反补互推演示 三.进制位的表示 1.不同进制位的特征&#xff1a; 2.二进制位表示 3.整型的二进制表…

如何利用python实现灰色关联分析?

1.灰色关联分析简介 灰色系统这个概念是相对于白色系统和黑色系统而言的。从控制论的知识里&#xff0c;颜色一般代表对于一个系统我们已知信息的多少&#xff0c;白色代表信息量充足&#xff0c;黑色代表我们其中的构造并不清楚的系统&#xff0c;而灰色介于两者之间&#xf…

WhatsApp如何让客户参与变得更简单?

WhatsApp对你的品牌来说可能和Twitter和Facebook一样重要&#xff0c;你可能已经把它们纳入你的社交媒体战略。 是的&#xff0c;WhatsApp不仅仅可以用来给同事发短信或与远方的亲戚视频聊天&#xff0c;它也适用于商业。 在发展WhatsApp业务时&#xff0c;小企业主得到了最优…

K8s基础9——服务发现Coredns、Ingress Controller多种暴露方式、TLS+DaemonSet、Headless Services

文章目录 一、服务发现机制1.1 环境变量注入1.2 DNS解析 二、Ingress4.1 部署Ingress controller4.2 暴露Ingress Controller4.2.1 SVC NodePort方式4.2.2 共享宿主机网络方式 4.3 默认后端4.4 同域名不同URL转不同服务4.5 不同域名转不同服务4.6 使用https4.6.1 安装cfssl4.6.…

如何用 Serverless 一键部署 Stable Diffusion?

作者 | 寒斜&#xff08;阿里云智能技术专家&#xff09; 思路 其实很简单&#xff0c; 我们只需要将镜像里面的动态路径映射到 NAS文件存储里面即可&#xff0c;利用 NAS 独立存储文件模型&#xff0c;扩展&#xff0c;语言包等&#xff0c;并且我们可以为管理 NAS 单独配置…

使用Python和Django构建一个全功能的在线医疗问诊平台

在线医疗问诊平台应运而生&#xff0c;为患者和医生之间提供了一个便捷的交流平台。本文将介绍如何使用Python和Django构建一个全功能的在线医疗问诊平台。 功能 在我们的平台上&#xff0c;患者可以注册账户、查询医生、预约诊断、支付费用并与医生沟通。医生可以创建个人档…

【.NET CORE】使用Rotativa.AspNetCore将网页转换为PDF

插件功能&#xff1a;将在线网页转换为PDF显示&#xff0c;文件保存 组件配置&#xff1a; 1、在NuGet管理中搜索Rotativa.AspNetCore并安装稳定版&#xff0c;项目github地址&#xff1a;GitHub - webgio/Rotativa.AspNetCore: Rotativa for Asp.Net Core 2、github下载项目…

Docker安装部署MySQL

1、拉取镜像 docker pull mysql:8.0 2、查看镜像 docker images 3、创建文件夹 mkdir ~/mysql cd mysql/ 4、创建并启动MySQL容器 docker run -id \ > -p 3306:3306 \ > --namec_mysql \ > -v $PWD/conf:/etc/mysql/conf.d \ > -v $PWD/logs:/logs \ > -…

Vue Emelent-UI表格合并行或列rowspan和colspan的作用

Vue Element-UI的table组件支持合并行或者列&#xff0c;在这里做个简单的学习笔记。 我们可以通过rowspan和colspan来进行单元格合并&#xff0c;那么这两个属性是什么意思呢&#xff0c;通过官方给的demo来探讨下。 上述单元格将行index为奇数的第一列和第二列合并为一个单…

python dict 取值方法

在日常工作中&#xff0c;我们经常会遇到需要将一些数据转换为 dict格式的情况。比如&#xff1a; 1、想要将多个数组按照某种规则进行排列&#xff0c;形成有序的数据表&#xff0c;这时需要使用 dict函数。 3、想要将数据按照指定的方式进行存储&#xff0c;比如&#xff1a;…

Maven自定义配置

修改maven默认字符编码 maven默认编码为GBK 注:配好MAVEN_HOME的环境变量后,在运行cmd. 打开cmd 运行mvn -v命令即可. 修改UTF-8为默认编码.设置系统环境变量 变量名MAVEN_OPTS 变量值-Dfile.encodingUTF-8 还可以添加其他配置&#xff0c;比如&#xff1a; -Xms256m -Xmx512m…

IDEA编译JDK1.8源码及运行测试

———————————————— 版权声明&#xff1a;本文为CSDN博主「神韵499」的原创文章&#xff0c;遵循CC 4.0 BY-SA版权协议&#xff0c;转载请附上原文出处链接及本声明。 原文链接&#xff1a;https://blog.csdn.net/qq_41055045/article/details/112002440 ————…

【Qt编程之Widgets模块】-004:QTableWidget及基本操作

QTableWidget及基本操作 1. 概述2. 主要操作函数2.1 QTableWidgets实例化2.2 设置表头 setHorizontalHeaderLabels2.3 单元格选择&#xff1a;setSelectionBehavior2.4 设置列数 setColumnCount2.5 设置行数 setRowCount2.6 网格的显示 setShowGrid2.7 添加表项 setItem2.8 表项…

常见的锁和zookeeper

zookeeper 本文由 简悦 SimpRead 转码&#xff0c; 原文地址 zhuanlan.zhihu.com 前言 只有光头才能变强。 文本已收录至我的 GitHub 仓库&#xff0c;欢迎 Star&#xff1a;https://github.com/ZhongFuCheng3y/3y 上次写了一篇 什么是消息队列&#xff1f;以后&#xff0c;本来…

Ubuntu 20.04.5 LTS x86_64 Docker stable diffusion webui 及 http api接口

资源 Docker镜像 docker pull darkroot1234/ayanami:latest 参考地址&#xff1a; docker一键运行stable diffusion webui&#xff0c;常用插件和功能完备&#xff0c;获得镜像后可打包带走 - 哔哩哔哩 nvidia cuda 驱动 https://us.download.nvidia.cn/XFree86/aarch64/…