基于卷积变分自编码器的心电信号异常检测

news2024/10/5 20:26:49

代码较为简单,运行代码如下:

# Built-in libraries
#
import os
import time
import random
import pandas as pd
import numpy  as np
from   tqdm   import tqdm
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Visualization libraries
#
import matplotlib.pyplot as plt 
import seaborn           as sns


# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# Sklearn library
#
from sklearn.preprocessing import StandardScaler 
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
# PyTorch library
#
import torch
import torch.nn                     as nn
from   torch.utils.data             import DataLoader
# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
#
# User libraries
#
from utils.EarlyStopping     import *
from utils.LRScheduler       import *
from utils.LossFunctions     import *
from utils.plot_latent_space import *
from utils.Convolutional_VAE import *
# Sets the seed 
#
seed = 42
random.seed( seed )
torch.manual_seed( seed )
np.random.seed( seed )

Set parameters

class Parameters:
    def __init__(self):
        
        # Data preprocessing
        #
        self.scaling = False
        # Dimension of latent space
        self.latent_dim = 3
        # Training ratio - Spliting data for training/validation        
        self.training_ratio = 0.9
        
        
        # Neural network hyper-parameter
        #
        #
        # Batch size
        self.batch_size = 32
        # Number of worker in loaders
        self.num_workers = 0
        # Learning rate
        self.learning_rate = 0.001
        # Number of epochs to train the model
        self.epochs = 100
        # Define verbose
        self.verbose = True
        # Patience for early stoping
        self.patience = 20
        # Saved model path
        self.path = 'models/Convolutional_VAE.pth'
        


        # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
        # GPU
        #
        #
        # Use gpu
        self.use_gpu       = True
        # Selected GPU
        self.gpu           = 0
  
        
    def print(self):
        '''
            Print user parameters and settings
        '''
        print('[INFO] Parameters')
        print(50*'-')
        d = self.__dict__
        for x in d.keys():
            print('{}: {}'.format(x, d[x]))
            
params = Parameters()
if (params.use_gpu):
    try:
        torch.cuda.init()


        if (torch.cuda.is_available() == True):
            print('[INFO] CUDA is available')


            device = torch.device( 'cuda:{params.gpu}' )
            print('[INFO] Device name: %s' % torch.cuda.get_device_name(0))


        else:
            print('[INFO] CUDA is not available')
            device = torch.device( 'cpu' )
    except:
        params.gpu = False
        print('[INFO] CUDA is not available')
        device = torch.device( 'cpu' )
else:
        print('[INFO] CPU')
        device = torch.device( 'cpu' )
params.print()
[INFO] Parameters
--------------------------------------------------
scaling: False
latent_dim: 3
training_ratio: 0.9
batch_size: 32
num_workers: 0
learning_rate: 0.001
epochs: 100
verbose: True
patience: 20
path: models/Convolutional_VAE.pth
use_gpu: True
gpu: False

Import training data

# Start timer
start = time.time()


data = np.loadtxt('ECG5000/ECG5000_TRAIN', delimiter=',')


trainX = data[:, 1:]
trainY = data[:, 0]


# Note: Since you are treating this problem with a semi-supervised approach
# we keep only the 'NORMAL' cases
trainX = trainX[trainY == 1]
trainY = trainY[trainY == 1]


print('[INFO] Training data imported')
print('[INFO] Time: %.2f seconds' % (time.time() - start))

Import testing data

# Start timer
start = time.time()


data = np.loadtxt('ECG5000/ECG5000_TEST', delimiter=',')


testX = data[:, 1:]
testY = data[:, 0]


print('[INFO] Testing data imported')
print('[INFO] Time: %.2f seconds' % (time.time() - start))
print('[INFO] Training instances: ', trainX.shape[0])
print('[INFO] Testing instances:  ', testX.shape[0])
[INFO] Training instances:  2919
[INFO] Testing instances:   4500

Change labels: Normal(0), Abnormal(1)

trainY = np.where(trainY == 1, 'NORMAL', 'ANOMALY')
testY  = np.where(testY  == 1, 'NORMAL', 'ANOMALY')

Reshaping inputs

# Expand dimension
#
trainX = np.expand_dims(trainX, axis=-1)
testX  = np.expand_dims(testX, axis=-1)




# Reshape data
#
trainX = np.swapaxes(trainX, 1, 2)
testX  = np.swapaxes(testX,  1, 2)

Create validation set

train_idx = int( trainX.shape[0] * params.training_ratio)


validX = trainX[train_idx:]
trainX = trainX[:train_idx]


print('[INFO] Training instances:    ', trainX.shape[0])
print('[INFO] Validation instances:  ', validX.shape[0])
[INFO] Training instances:     2627
[INFO] Validation instances:   292

Scale data

# # Standardize/scale the dataset and apply PCA




if ( params.scaling ):
    print('[INFO] Scaling')
    # Set scaler
    #
    scaler = StandardScaler()




    # Scaling - Training set
    #
    trainX = scaler.fit_transform( trainX )




    # Scaling - Testing set
    #
    testX  = scaler.transform( testX )

Info

nFeatures      = trainX.shape[1]
sequenceLength = trainX.shape[2]




print('[INFO] Number of features: ', nFeatures)
print('[INFO] Sequence length:    ', sequenceLength)

Setup loaders

# Create training and test dataloaders
#
#
num_workers = params.num_workers
# how many samples per batch to load
batch_size  = params.batch_size




# Prepare Data-Loaders
#
train_loader = torch.utils.data.DataLoader(trainX, batch_size=batch_size, num_workers=num_workers)
valid_loader = torch.utils.data.DataLoader(validX, batch_size=batch_size, num_workers=num_workers)
test_loader  = torch.utils.data.DataLoader(testX,  batch_size=batch_size, num_workers=num_workers)

Convolutional VAE model

Setup model

# Initialize VAE
#
model = VAE( nFeatures = trainX.shape[1], 
             z_dim      = params.latent_dim,
             device     = device )




model.to( device );




print( model )
VAE(
  (encoder): Encoder(
    (encoder): Sequential(
      (0): Conv1d(1, 32, kernel_size=(4,), stride=(2,), padding=(2,))
      (1): ReLU()
      (2): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (3): Conv1d(32, 64, kernel_size=(8,), stride=(2,))
      (4): ReLU()
    )
    (fc1): Linear(in_features=2048, out_features=128, bias=True)
    (fc1a): Linear(in_features=128, out_features=3, bias=True)
    (fc1b): Linear(in_features=128, out_features=3, bias=True)
    (relu): ReLU()
  )
  (decoder): Decoder(
    (fc1): Linear(in_features=3, out_features=128, bias=True)
    (fc2): Linear(in_features=128, out_features=2048, bias=True)
    (relu): ReLU()
    (decoder): Sequential(
      (0): ConvTranspose1d(64, 32, kernel_size=(8,), stride=(2,))
      (1): ReLU()
      (2): BatchNorm1d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (3): ConvTranspose1d(32, 1, kernel_size=(2,), stride=(2,))
    )
  )
)

Training

Training setup

# Set Optimizer
#
optimizer = torch.optim.RMSprop(params = model.parameters(), 
                                lr     = params.learning_rate)












# Early stopping
#
early_stopping = EarlyStopping(patience  = params.patience,
                               verbose   = params.verbose,
                               path      = params.path,
                               delta     = 0)








# LR scheduler
#
scheduler = LRScheduler(optimizer = optimizer, 
                        patience  = 10, 
                        min_lr    = 1e-6, 
                        factor    = 0.5, 
                        verbose   = params.verbose)

Training process

# Store training and validation loss
Losses = {
           'Train_Loss': [], 
           'Train_MSE':  [],
           'Train_KL':   [],
           'Valid_Loss': [],
           'Valid_MSE':  [],
           'Valid_KL':   [],
         }






# Set number at how many iteration the training process (results) will be provided
#
batch_show = (train_loader.dataset.shape[0] // batch_size // 5)










# Main loop - Training process
#
for epoch in range(1, params.epochs+1):


    # Start timer
    start = time.time()
    
    # Monitor training loss
    train_loss = 0.0
    train_MSE  = 0.0
    train_KL   = 0.0
    valid_loss = 0.0
    valid_MSE  = 0.0
    valid_KL   = 0.0
    
    
    
    
    ###################
    # Train the model #
    ###################
    batch_idx = 0
    for data in train_loader:
        
        # Clear the gradients of all optimized variables
        #
        optimizer.zero_grad()
        
        # Forward pass: compute predicted outputs by passing inputs to the model
        #
        if (torch.cuda.is_available() == True):
            data = torch.tensor(data, dtype=torch.float32).cuda()
        else:
            data = torch.tensor(data, dtype=torch.float32)
            
        z_loc, z_scale, decoded = model( data )
        


        
        # Calculate the loss
        #
        loss, MSE, KL = ELBO(decoded, data, z_loc, z_scale)
        
        
        
        # Backward pass: compute gradient of the loss with respect to model parameters
        #
        loss.backward()
        
        
        
        # Perform a single optimization step (parameter update)
        #
        optimizer.step()
        
        
        
        # Update running training loss
        #
        train_loss += loss.item()*data.size(0)
        train_MSE  += MSE.item()*data.size(0)
        train_KL   += KL.item()*data.size(0)
        
        batch_idx  += 1
        if (params.verbose == True and batch_idx % batch_show == 0):
            print('> Epoch: {} [{:5.0f}/{} ({:.0f}%)]'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader)))        


           
        
    # Print avg training statistics 
    #
    train_loss /= train_loader.dataset.shape[0]
    train_MSE  /= train_loader.dataset.shape[0]
    train_KL   /= train_loader.dataset.shape[0]
    
    
    
    
    
    
    for data in valid_loader:
        
        
        # Forward pass: compute predicted outputs by passing inputs to the model
        #
        if (torch.cuda.is_available() == True):
            data = torch.tensor(data, dtype=torch.float32).cuda()
        else:
            data = torch.tensor(data, dtype=torch.float32)


        z_loc, z_scale, decoded = model( data )
        


        
        # Calculate the loss
        #
        loss, MSE, KL = ELBO(decoded, data, z_loc, z_scale)
        
               
        # Update running training loss
        #
        valid_loss += loss.item()*data.size(0)
        valid_MSE  += MSE.item()*data.size(0)
        valid_KL   += KL.item()*data.size(0)            


    # Print avg training statistics 
    #
    valid_loss /= valid_loader.dataset.shape[0]
    valid_MSE  /= valid_loader.dataset.shape[0]
    valid_KL   /= valid_loader.dataset.shape[0]
    
    
    # Stop timer
    #
    stop  = time.time()
    
    
    # Show training results
    #
    print('[INFO] Train Loss: {:.6f} \tValid Loss: {:.6f} \tTime: {:.2f}secs'.format(train_loss, valid_loss, stop-start))


   
    
    
    # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    # Early Stopping
    #                
    early_stopping(valid_loss, model)
    if early_stopping.early_stop:
        print("Early stopping")
        break            
     
    
    # Store train/val loss
    #
    Losses['Train_Loss'] += [train_loss]
    Losses['Train_MSE']  += [train_MSE]
    Losses['Train_KL']   += [train_KL]
    Losses['Valid_Loss'] += [valid_loss]
    Losses['Valid_MSE']  += [valid_MSE]
    Losses['Valid_KL']   += [valid_KL]
    
    
    
    # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
    # Learning rate scheduler
    #
    scheduler(valid_loss)
model.load_state_dict(torch.load('models/Convolutional_VAE.pth'));
model.eval();


print('[INFO] Model loaded')

Evaluation

Training

Calculate loss for training set data

# Set MSE as criterion
#
criterion = nn.MSELoss()


train_loss = []
for data in tqdm( train_loader ):
    
    # Convert Double(float64) to Float(float32)
    #
    if (torch.cuda.is_available() == True):
        data = torch.tensor(data, dtype=torch.float32).cuda()
    else:
        data = torch.tensor(data, dtype=torch.float32)


    # Forward pass: compute predicted outputs by passing inputs to the model
    _, _, outputs = model( data )


    # Calculate the loss
    for dim in range( data.shape[0] ):
        train_loss.append( criterion(outputs[dim], data[dim]).item() )


train_loss = np.array(train_loss)    




# Create DataFrame with results (loss)
#
Train_Loss = pd.DataFrame( 
                           {
                             'Loss'           : train_loss,
                           }
                         )
fig, ax = plt.subplots( figsize=(20, 5) )


sns.distplot(Train_Loss['Loss'], bins='auto', kde=True, ax=ax)
plt.xlabel("Train Loss", size = 14)
plt.ylabel("No of samples",  size = 14)
plt.xticks(size = 12);
plt.yticks(size = 12);
plt.show()


# Calculate threshold
#
WARNING  = np.quantile( Train_Loss['Loss'], 0.95 )
CRITICAL = np.quantile( Train_Loss['Loss'], 0.997 )


print('[INFO] Thresholds')
print(30*'-')
print("WARNING:  %.6f" % WARNING)
print("CRITICAL: %.6f" % CRITICAL)

def anomalyEvaluation(x):
    if (x < WARNING):
        return ('NORMAL')
    elif (x >= WARNING and x < CRITICAL):
        return ('WARNING')
    else:
        return ('CRITICAL')
    
# Detect all the samples which are anomalies (WARNING/CRITICAL)
#
Train_Loss['Status'] = Train_Loss['Loss'].apply( anomalyEvaluation )


Train_Loss['Status'].value_counts()

Testing

Calculate loss for testing set data

# Get test MAE loss.
test_losses = []
for data in tqdm( test_loader ):


        # Convert Double(float64) to Float(float32)
        #
        if (torch.cuda.is_available() == True):
            data = torch.tensor(data, dtype=torch.float32).cuda()
        else:
            data = torch.tensor(data, dtype=torch.float32)
 
        
        # Forward pass: compute predicted outputs by passing inputs to the model
        #
        _, _, outputs = model(data)
        
        
        # Calculate the loss
        #
        for dim in range( data.shape[0] ):
            test_losses.append( criterion(outputs[dim], data[dim]).item() )


test_losses = np.array(test_losses)  




# Create DataFrame with results(loss)
Test_Loss = pd.DataFrame( 
                          {
                            'Loss'            : test_losses
                          }
                        )
fig, ax = plt.subplots( figsize=(20, 5) )


sns.distplot(Train_Loss['Loss'], bins='auto', kde=True, ax=ax)
sns.distplot(Test_Loss['Loss'],  bins='auto', kde=True, ax=ax)




plt.legend(['Training', 'Testing'], frameon = False, fontsize = 14)
plt.xlabel("Loss", size = 14)
plt.ylabel("No of samples",  size = 14)
plt.xticks(size = 12);
plt.yticks(size = 12);

# Detect all the samples which are anomalies (WARNING/CRITICAL)
#
Test_Loss['Status'] = Test_Loss['Loss'].apply( anomalyEvaluation )


Test_Loss['Status'].value_counts()
from sklearn.metrics import confusion_matrix


pred = Test_Loss['Status'].values
pred = np.where(pred != 'NORMAL', 'ANOMALY', 'NORMAL')
# pred = np.where(pred == 'CRITICAL', 'ANOMALY', 'NORMAL')


CM = confusion_matrix(testY, pred)
plt.figure( figsize = (4, 4) )
sns.set(font_scale = 1.2)


sns.heatmap(CM / np.sum(CM, axis=1), 
            annot=True, 
            xticklabels = ['ANOMALY','NORMAL'],
            yticklabels = ['ANOMALY','NORMAL'], 
            cbar = False,
            fmt='.1%', 
            cmap='Blues')

Latent vectors study

op = []
for data in tqdm( test_loader ):


        # Convert Double(float64) to Float(float32)
        #
        if (torch.cuda.is_available() == True):
            data = torch.tensor(data, dtype=torch.float32).cuda()
        else:
            data = torch.tensor(data, dtype=torch.float32)
 
        
        # Forward pass: compute predicted outputs by passing inputs to the model
        #
        z = model.transform( data )


        # Store outputs
        #
        op += list( z.detach().cpu().numpy() )


        
# Convert to NumPy array
#
op = np.array( op )
plot_latent_space(op, Test_Loss['Status'].values, figsize=(20, 8))

import umap
#
embedding = umap.UMAP(n_neighbors = 30).fit_transform( op )
labels = []
for (x,y) in zip(pred, testY):
    if (x == y):
        if (x == 'NORMAL'):
            labels += ['TP']
        else:
            labels += ['TN']
    else:
        if (x == 'NORMAL'):
            labels += ['FP']
        else:
            labels += ['FN']
plt.figure( figsize = (7, 7))
sns.scatterplot(x = embedding[:, 0], 
                y = embedding[:, 1], 
                hue   = labels, 
                style = labels,
                size  = labels,
                legend = labels,
                sizes = (100, 40));


plt.title('UMAP on latent-space - Confusion matrix')

plt.figure( figsize = (7, 7) )


idx = np.where(pred == testY)
plt.scatter(embedding[idx, 0], embedding[idx, 1], marker='o', color='tab:blue', s = 20)
idx = np.where(pred != testY)
plt.scatter(embedding[idx, 0], embedding[idx, 1], marker='x', color='tab:red',  s = 40)


plt.title('UMAP on latent-space - Classification evaluation')
plt.legend(['Correct', 'Error'], frameon = False, fontsize = 14);

工学博士,担任《Mechanical System and Signal Processing》《中国电机工程学报》《控制与决策》等期刊审稿专家,擅长领域:现代信号处理,机器学习,深度学习,数字孪生,时间序列分析,设备缺陷检测、设备异常检测、设备智能故障诊断与健康管理PHM等。

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1836955.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

179海关接口源码并实践:打造具备跨境报关功能的多平台商城

一、跨境电商的发展与挑战 随着全球化的快速发展&#xff0c;跨境电商成为了各国商家开拓市场的重要方式。然而&#xff0c;跨境电商在面临海关报关等复杂流程时&#xff0c;常常遇到各种挑战。为了解决这些问题&#xff0c;许多商家开始关注179海关接口源码的使用&#xff0c…

mkv文件怎么转成mp4?教你四种常见的转换方法!

mkv文件怎么转成mp4&#xff1f;大家在使用mkv文件的时候有没有遇到过下面这些缺点&#xff0c;首先是mkv的兼容性不行&#xff0c;这体验在它不方便分享上面&#xff0c;很有可能我们分享出去但是对方根本无法进行接受&#xff0c;这就导致我们需要进行额外的操作才能分享&…

qt登录和闹钟实现

qt实现登录 #include "widget.h" #include "ui_widget.h"Widget::Widget(QWidget *parent): QWidget(parent), ui(new Ui::Widget) {ui->setupUi(this);// 去掉头部this->setWindowFlag(Qt::FramelessWindowHint);// 去掉空白部分this->s…

汽车IVI中控开发入门及进阶(二十七):车载摄像头vehicle camera

前言: 在车载IVI、智能座舱系统中,有一个重要的应用场景就是视频。视频应用又可分为三种,一种是直接解码U盘、SD卡里面的视频文件进行播放,一种是手机投屏,就是把手机投屏软件已视频方式投屏到显示屏上显示,另外一种就是对视频采集设备(主要就是摄像头Camera)的视频源…

反激开关电源保险丝以及热敏电阻的选型

保险丝&#xff08;2A/250V&#xff09; 保险丝的选型及计算 1、保险丝的作用就是在电路出现故障造成过流甚至短路时能及时切断电路电源的联系。&#xff08; 保护后 级电路&#xff0c;一旦出现故障&#xff0c;由于电流过大温度过高&#xff0c;保险丝熔断 &#xff09; 2、…

硫碳复合材料可用作固态电池正极材料 锂硫电池是重要下游

硫碳复合材料可用作固态电池正极材料 锂硫电池是重要下游 硫碳复合材料&#xff0c;是半固态电池、固态电池的正极材料&#xff0c;主要用于金属硫电池制造领域&#xff0c;在锂硫电池应用中研究热度最高。 锂硫电池&#xff0c;一种二次电池&#xff0c;以硫元素为正极&#x…

【多模态】39、HRVDA | 基于高分辨率输入的高效文档助手(CVPR2024)

论文&#xff1a;HRVDA: High-Resolution Visual Document Assistant 代码&#xff1a;暂无 出处&#xff1a;中国科学技术大学 | 腾讯优图 贡献点&#xff1a; 作者提出了高分辨率视觉文档助手 HRVDA&#xff0c;能直接处理高分辨率图像输入作者提出了内容过滤机制和指令过…

【Linux环境下Hadoop部署】— 报错“bash: myhadoop.sh: command not found“

项目场景&#xff1a; 执行 “myhadoop.sh stop” 命令。 问题描述 bash: myhadoop.sh: command not found 原因分析&#xff1a; 查看我们的系统配置&#xff0c;发现没有myhadoop.sh文件存放的路径。 解决方案&#xff1a; 1、执行 “sudo vim /etc/profile” 命令&#xff…

滚球游戏笔记

1、准备工作 (1) 创建地面&#xff1a;3D Object-Plane&#xff0c;命名为Ground (2) 创建小球&#xff1a;3D Object-sphere&#xff0c;命名为Player&#xff0c;PositionY 0.5。添加Rigidbody组件 (3) 创建文件夹&#xff1a;Create-Foder&#xff0c;分别命名为Material…

为什么选择住宅代理?IPXProxy住宅代理的优势

​在数字化时代&#xff0c;隐私和数据安全成为了每个互联网用户的关注焦点。住宅代理作为一种有效保护隐私的工具&#xff0c;因其独特的优势备受推崇。本文将从用户的角度&#xff0c;探讨选择住宅代理的诸多优势。 什么是住宅代理&#xff1f; 简单来说&#xff0c;住宅代理…

有哪些零售O2O应用模式?如何构建O2O闭环生态系统?

在零售业的演变历程中&#xff0c;O2O模式的兴起标志着一个新时代的开始。这种模式以其创新性&#xff0c;将线上的便捷与线下的实体体验完美融合&#xff0c;为消费者带来了前所未有的购物便利和体验丰富性。随着技术的不断进步和消费者需求的日益多样化&#xff0c;O2O模式已…

图神经网络入门(1)-networkx

简介 NetworkX是一个Python语言的图论建模工具&#xff0c;用于创建、操作复杂网络结构&#xff08;如图、有向图等&#xff09;。它提供了许多用于分析网络、生成随机网络、以及可视化网络的函数和工具。用户可以利用NetworkX来研究复杂网络的拓扑结构、节点间的关系以及路径查…

TikTok账号养号的流程分享

对于很多刚开始运营TikTok的新手小白来说&#xff0c;都会有一个同样的疑问&#xff0c;那就是&#xff1a;TikTok到底需不需要养号&#xff1f;这里明确告诉大家是需要养号的&#xff0c;今天就把我自己实操过的养号经验和策略总结出来&#xff0c;分享给大家。 一、什么是Ti…

新面貌、新功能、新内容!禅道官网改版升级,全面提升用户体验

为了给用户更好的体验&#xff0c;禅道团队于23年6月与艾体验团队达成合作&#xff0c;正式启动了禅道官网改版的项目&#xff0c;历经一年的努力&#xff0c;2024年6月7日&#xff0c;禅道新官网顺利完成改版升级&#xff0c;正式上线与大家见面啦&#xff01; 此次改版上线的…

超简洁的待办事项自托管便签todo

什么是todo todo 是一个自托管的 todo web 应用程序&#xff0c;可让您以简单且最少的方式跟踪您的 todo。 搭建 使用Docker命令行方式进行搭建 docker run -d -p 8000:8000 -v todo_db:/usr/local/go/src/todo/todo.db prologic/todo Docker-compose.yml version: 3 ​ se…

Nvidia Isaac Sim 入门教程 2024(2)安装与配置

Isaac Sim 安装与环境配置 版权信息 Copyright 2023-2024 Herman YeAuromix. All rights reserved.This course and all of its associated content, including but not limited to text, images, videos, and any other materials, are protected by copyright law. The a…

RT-Thread PIN设备

RT-Thread PIN设备 RT-Thread PIN设备驱动框架RT-Thread PIN设备驱动层次图RT-Thread PIN设备注册RT-Thread PIN设备注册函数 RT-Thread PIN设备操作函数pin_getpin_modepin_writepin_readpin_attach_irqpin_detach_irqpin_irq_enable PIN设备又叫GPIO设备&#xff0c;是MCU输入…

每日一练:攻防世界:qr-easy

本题思路与CTFSHOW: 36D杯 misc ez-qrcode思路相同 工具链接&#xff1a;补全二维码QRazyBox - QR Code Analysis and Recovery Toolkit (h3110w0r1d.com) 1.首先&#xff0c;我们需要基于上图的干净图像。 此二维码的大小为 29x29&#xff0c;版本V的大小为N N&#xff0c;…

基于STM32和人工智能的智能四轴飞行器系统

目录 引言环境准备智能四轴飞行器系统基础代码实现&#xff1a;实现智能四轴飞行器系统 4.1 数据采集模块4.2 数据处理与分析4.3 控制系统4.4 用户界面与数据可视化应用场景&#xff1a;智能飞行器管理与优化问题解决方案与优化收尾与总结 1. 引言 随着无人机技术的发展&…

Mybatis认识与学习

前言 在客户端工具中&#xff0c;编写增删改查的SQL语句&#xff0c;发给MySQL数据库管理系统&#xff0c;由数据库管理系统执行SQL语句并返回执行结果。 增删改操作&#xff1a;返回受影响行数 查询操作&#xff1a;返回结果集(查询的结果) 我们做为后端程序开发人员&#xff…