使用pytorch搭建textCNN、BERT、transformer进行文本分类

news2024/11/18 23:34:21

首先展示数据处理后的类型:
在这里插入图片描述
第一列为文本,第二类为标注的标签,数据保存在xlsx的表格中,分为训练集和验证集。

textCNN

直接上整个工程代码:

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from transformers import BertTokenizer, BertModel
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 BERT 模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
bert_model = BertModel.from_pretrained('bert-base-chinese')

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class TextCNN(nn.Module):
    def __init__(self, bert_model, num_classes):
        super(TextCNN, self).__init__()
        self.bert_model = bert_model
        self.conv1 = nn.Conv2d(1, 100, (3, 768))
        self.conv2 = nn.Conv2d(1, 100, (4, 768))
        self.conv3 = nn.Conv2d(1, 100, (5, 768))
        self.dropout = nn.Dropout(0.5)
        self.fc = nn.Linear(300, num_classes)

    def forward(self, input_ids, attention_mask):
        with torch.no_grad():
            embedded = self.bert_model(input_ids, attention_mask).last_hidden_state
        embedded = embedded.unsqueeze(1)
        conv1 = F.relu(self.conv1(embedded)).squeeze(3)
        conv2 = F.relu(self.conv2(embedded)).squeeze(3)
        conv3 = F.relu(self.conv3(embedded)).squeeze(3)
        pooled1 = F.max_pool1d(conv1, conv1.size(2)).squeeze(2)
        pooled2 = F.max_pool1d(conv2, conv2.size(2)).squeeze(2)
        pooled3 = F.max_pool1d(conv3, conv3.size(2)).squeeze(2)
        out = torch.cat((pooled1, pooled2, pooled3), 1)
        out = self.dropout(out)
        return self.fc(out)

# 初始化模型、损失函数和优化器
model = TextCNN(bert_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

可以根据自己爬取的文本的长度来自定义preprocess()函数里面的max_length值,若文本长度超过定义的最大值将进行截断,若不足则padding。最好包括大部分文本的长度,模型效果会比较好。

运行起来可能会报以下错误:

OSError: Can't load tokenizer for 'bert-base-chinese'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'bert-base-chinese' is the correct path to a directory containing all relevant files for a BertTokenizer tokenizer.

说明自动下载可能出现问题我们可以手动下载。
访问给出的网站https://huggingface.co/models,然后搜索bert-base-chinese,如图选择第一个。
在这里插入图片描述
点进去后下载config.json、pytorch_model.bin 和 vocab.txt三个文件,在工程的同一路径下创建“bert-base-chinese”的文件夹,将三个文件放入其中。
在这里插入图片描述
将使用预训练的 BERT 模型和分词器的两行代码该文调用本地:

tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese')
bert_model = BertModel.from_pretrained('./bert-base-chinese')

运行结果:
在这里插入图片描述

BERT

跟textCNN一样用相同的BERT 模型和分词器:

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from transformers import BertTokenizer, BertModel, BertConfig
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 BERT 模型和分词器
tokenizer = BertTokenizer.from_pretrained('./bert-base-chinese')
bert_model = BertModel.from_pretrained('./bert-base-chinese')

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class BertForTextClassification(nn.Module):
    def __init__(self, bert_model, num_classes):
        super(BertForTextClassification, self).__init__()
        self.bert = bert_model
        self.dropout = nn.Dropout(0.5)
        self.classifier = nn.Linear(bert_model.config.hidden_size, num_classes)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

# 初始化模型、损失函数和优化器
model = BertForTextClassification(bert_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.00001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

运行结果:
在这里插入图片描述

transformer

transformer使用的是自己的预训练模型和分词器,如果需要提前手动下载的话访问Hugging Face的官网还是下载config.json、pytorch_model.bin 和 vocab.txt三个文件,保存在hfl/chinese-roberta-wwm-ext目录中。

import pandas as pd
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.optim as optim
from transformers import AutoTokenizer, AutoModel
import random
from sklearn.metrics import classification_report

# 设置随机种子以确保结果可复现
def set_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

set_seed(42)

# 使用预训练的 Transformer 模型和分词器
model_name = './hfl/chinese-roberta-wwm-ext'
tokenizer = AutoTokenizer.from_pretrained(model_name)
transformer_model = AutoModel.from_pretrained(model_name)

# 定义一个函数来处理文本数据
def preprocess(text):
    encoding = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=150,
        padding='max_length',
        return_attention_mask=True,
        return_tensors='pt',
        truncation=True
    )
    return encoding['input_ids'].squeeze(), encoding['attention_mask'].squeeze()

# 读取训练和验证数据
train_df = pd.read_excel('../train.xlsx')
val_df = pd.read_excel('../val.xlsx')

# 处理训练数据
train_texts = train_df['comment'].apply(preprocess)
train_labels = torch.tensor(train_df['label'].values)
train_input_ids = torch.stack([x[0] for x in train_texts])
train_attention_masks = torch.stack([x[1] for x in train_texts])

# 处理验证数据
val_texts = val_df['comment'].apply(preprocess)
val_labels = torch.tensor(val_df['label'].values)
val_input_ids = torch.stack([x[0] for x in val_texts])
val_attention_masks = torch.stack([x[1] for x in val_texts])

# 创建数据集和数据加载器
class TextDataset(Dataset):
    def __init__(self, input_ids, attention_masks, labels):
        self.input_ids = input_ids
        self.attention_masks = attention_masks
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return {
            'input_ids': self.input_ids[idx],
            'attention_mask': self.attention_masks[idx],
            'labels': self.labels[idx]
        }

train_dataset = TextDataset(train_input_ids, train_attention_masks, train_labels)
val_dataset = TextDataset(val_input_ids, val_attention_masks, val_labels)

train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

# 检查是否有可用的 GPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'Using device: {device}')

# 定义模型
class TransformerForTextClassification(nn.Module):
    def __init__(self, transformer_model, num_classes):
        super(TransformerForTextClassification, self).__init__()
        self.transformer = transformer_model
        self.dropout = nn.Dropout(0.5)
        self.classifier = nn.Linear(transformer_model.config.hidden_size, num_classes)

    def forward(self, input_ids, attention_mask):
        outputs = self.transformer(input_ids=input_ids, attention_mask=attention_mask)
        pooled_output = outputs.pooler_output if 'pooler_output' in outputs else outputs.last_hidden_state[:, 0]
        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output)
        return logits

# 初始化模型、损失函数和优化器
model = TransformerForTextClassification(transformer_model, num_classes=2).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.00001)

# 训练和验证模型,并保存最好的模型和最后一轮的模型
def train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device, epochs=10):
    best_val_accuracy = 0.0
    best_model_path = "best_model.pth"
    last_model_path = "last_model.pth"

    for epoch in range(epochs):
        model.train()
        train_loss = 0
        for batch in train_dataloader:
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['labels'].to(device)

            optimizer.zero_grad()
            outputs = model(input_ids, attention_mask)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

        train_loss /= len(train_dataloader)
        print(f"Epoch {epoch + 1}, Training Loss: {train_loss}")

        model.eval()
        val_loss = 0
        correct = 0
        total = 0
        all_preds = []
        all_labels = []
        with torch.no_grad():
            for batch in val_dataloader:
                input_ids = batch['input_ids'].to(device)
                attention_mask = batch['attention_mask'].to(device)
                labels = batch['labels'].to(device)

                outputs = model(input_ids, attention_mask)
                loss = criterion(outputs, labels)
                val_loss += loss.item()

                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

        val_loss /= len(val_dataloader)
        accuracy = 100 * correct / total
        print(f"Validation Loss: {val_loss}, Validation Accuracy: {accuracy}%")

        # 打印分类报告
        print("Classification Report:")
        print(classification_report(all_labels, all_preds, digits=3))

        # 保存验证集上表现最好的模型
        if accuracy > best_val_accuracy:
            best_val_accuracy = accuracy
            torch.save(model.state_dict(), best_model_path)
            print(f"Best model saved with accuracy: {best_val_accuracy}%")

    # 保存最后一轮的模型
    torch.save(model.state_dict(), last_model_path)
    print(f"Last model saved.")

train_and_evaluate(model, train_dataloader, val_dataloader, criterion, optimizer, device)

运行结果:
在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1720648.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Java排序算法汇总篇,八种排序算法

排序算法汇总: Java排序算法(一):冒泡排序 Java排序算法(二):选择排序 Java排序算法(三):插入排序 Java排序算法(四):快速排序 Java排序算法(五):归并排序 Java排序算法(六):希尔排序 Java排序算法(…

vm-bhyve网卡设定桥接故障解决@FreeBSD

问题 在使用vm-bhyve虚拟机管理软件的时候,使用vm无法绑定网卡igb0 vm switch add public igb0 报错:/usr/local/sbin/vm: ERROR: failed to add member igb0 to the virtual switch public 解决 于是准备用原生ifconfig命令来绑定,结果…

Pytorch的学习

1.基本数据:Tensor Tensor,即张量,是PyTorch中的基本操作对象,可以看做是包含单一数据类型元素的多维矩阵。从使用角度来看,Tensor与NumPy的ndarrays非常类似,相互之间也可以自由转换,只不过Te…

六一儿童节与AIGC:科技与童趣的奇妙融

随着人工智能生成内容(AIGC)技术的发展,越来越多的应用和网站专门为儿童提供学习、游戏和绘画方面的支持。这些平台不仅能够提高孩子们的学习兴趣,还能激发他们的创造力。在六一儿童节即将到来之际,让我们来介绍几款利…

Jmeter安装教程

1 Jmeter下载 Jmeter下载地址:https://jmeter.apache.org/download_jmeter.cgi,选择需要的版本点击下载 解压jmeter安装包 解压后的安装包如下: 2 配置Jmeter环境变量 进入环境变量配置页面:计算机->属性->高级系统设置-&…

javascript DOM 属性详解:读取、修改、移除

No.内容链接1Openlayers 【入门教程】 - 【源代码示例300】 2Leaflet 【入门教程】 - 【源代码图文示例 150】 3Cesium 【入门教程】 - 【源代码图文示例200】 4MapboxGL【入门教程】 - 【源代码图文示例150】 5前端就业宝典 【面试题详细答案 1000】 文章目录 一、读取…

[Algorithm][动态规划][子序列问题][最长递增子序列的个数][最长数对链]详细讲解

目录 1.最长递增子序列的个数1.题目链接2.算法原理详解3.代码实现 2.最长数对链1.题目链接2.算法原理详解3.代码实现 1.最长递增子序列的个数 1.题目链接 最长递增子序列的个数 2.算法原理详解 注意:本题思路和思维方式及用到的方法很值得考究,个人感…

springboot 项目集成 knife4j

官方版本推荐 版本依赖 spring boot 2.3.12.RELEASE 和 knife4j 2.0.9 引入依赖 完整的pom.xml文件 <!-- https://mvnrepository.com/artifact/com.github.xiaoymin/knife4j-spring-boot-starter --> <dependency><groupId>com.github.xiaoymin</groupI…

C# 类型系统

1. 隐式类型 c#允许使用 var 声明变量&#xff0c;编译期会通过初始化语句右侧的表达式推断出变量的类型。 // i is compiled as an int var i 5;// s is compiled as a string var s "Hello";// a is compiled as int[] var a new[] { 0, 1, 2 };// expr is co…

人脸识别模型与人类视觉识别的对比——评估人脸识别模型存在偏见是否比人类的偏见大?

1. 概述 人脸识别系统是一个几十年来一直备受关注的研究领域。而且在过去的几年中。公司和政府一直在积极引入人脸识别系统&#xff0c;并且我们看到越来越多的机会可以看到人脸识别系统。例如&#xff0c;有的系统可以随便介绍&#xff0c;如搜索特定人的图像&#xff08;图像…

关系数据库:关系运算

文章目录 关系运算并&#xff08;Union&#xff09;差&#xff08;Difference&#xff09;交&#xff08;Intersection&#xff09;笛卡尔积&#xff08;Extended Cartesian Product&#xff09;投影&#xff08;projection&#xff09;选择&#xff08;Selection&#xff09;除…

[Linux]vsftp配置大全---超完整版

[Linux]vsftp配置大全---超完整版 以下文章介绍Liunx 环境下vsftpd的三种实现方法 一、前言 Vsftp(Very Secure FTP)是一种在Unix/Linux中非常安全且快速稳定的FTP服务器&#xff0c;目前已经被许多大型站点所采用&#xff0c;如ftp.redhat.com,ftp.kde.org,ftp.gnome.org.等。…

switch语句

作用 让顺序执行的代码&#xff0c;产生分支。 基本语法 switch(变量) {//变量 常量 执行 case和 break之间的代码case 常量:满足条件执行的代码逻辑;break;case 常量:满足条件执行的代码逻辑;break;//case 可以有无数个default://如果上面case的条件都不满足 就会执行 def…

js四舍五入和计算精度问题处理

js四舍五入和计算精度问题处理 目录 js四舍五入和计算精度问题处理错误计算方法示例代码 js中加减乘除&#xff0c;部分数据会存在计算不准确。 错误计算 我使用的是big.js&#xff0c;基于big.js库封装了下工具方法&#xff0c;当然也可以用其他库&#xff0c;如mathjs/bignu…

【学习笔记】计算机组成原理(九+十)

控制单元的功能 文章目录 控制单元的功能9.1 微操作命令的分析9.1.1 取指周期9.1.2 间址周期9.1.3 执行周期9.1.4 中断周期 9.2 控制单元的功能9.2.1 控制单元的外特性9.2.2 控制信号举例9.2.3 多级时序系统9.2.4 控制方式 控制单元的设计10.1 组合逻辑设计10.1.1 组合逻辑控制…

04-树5 Root of AVL Tree(浙大数据结构PTA习题)

04-树5 Root of AVL Tree 分数 25 作者 陈越 单位 浙江大学 An AVL tree is a self-balancing binary search tree. In an AVL tree, the heights of the two child subtrees of any node differ by at most one; if at any time they differ by more th…

Matlab|基于粒子群算法优化Kmeans聚类的居民用电行为分析

目录 主要内容 部分代码 结果一览 下载链接 主要内容 在我们研究电力系统优化调度模型的过程中&#xff0c;由于每天负荷和分布式电源出力随机性和不确定性&#xff0c;可能会优化出很多的结果&#xff0c;但是经济调度模型试图做到通用策略&#xff0c;同样的策…

Java-集合基础

集合 一、含义 集合是Java API所提供的一系列类&#xff0c;可以用于动态存放多个对象 (集合只能存对象)集合与数组的不同在于&#xff0c;集合是大小可变的序列&#xff0c;而且元素类型可以不受限定&#xff0c;只要是引用类型。(集合中不能放基本数据类型&#xff0c;但可以…

WPF Binding对象

在WinForm中&#xff0c;我们要想对控件赋值&#xff0c;需要在后台代码中拿到控件对象进行操作&#xff0c;这种赋值形式&#xff0c;从根本上是无法实现界面与逻辑分离的。 在WPF中&#xff0c;微软引入了Binding对象&#xff0c;通过Binding&#xff0c;我们可以直接将控件与…

从零开始利用MATLAB进行FPGA设计(七)用ADC采集信号教程2

黑金的教程做的实在太拉闸了&#xff0c;于是自己摸索信号采集模块的使用方法。 ADC模块&#xff1a;AN9238 FPGA开发板&#xff1a;AX7020&#xff1b;Xilinx 公司的 Zynq7000 系列的芯片XC7Z020-2CLG400I&#xff0c;400引脚 FBGA 封装。 往期回顾&#xff1a; 从零开始利…