深入浅出 diffusion(4):pytorch 实现简单 diffusion

news2024/11/18 10:44:36

 1. 训练和采样流程

 2. 无条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as F
 
 
class ResidualConvBlock(nn.Module):
    def __init__(
        self, in_channels: int, out_channels: int, is_res: bool = False
    ) -> None:
        super().__init__()
        '''
        standard ResNet style convolutional block
        '''
        self.same_channels = in_channels==out_channels
        self.is_res = is_res
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
        )
 
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.is_res:
            x1 = self.conv1(x)
            x2 = self.conv2(x1)
            # this adds on correct residual in case channels have increased
            if self.same_channels:
                out = x + x2
            else:
                out = x1 + x2
            return out / 1.414
        else:
            x1 = self.conv1(x)
            x2 = self.conv2(x1)
            return x2
 
 
class UnetDown(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UnetDown, self).__init__()
        '''
        process and downscale the image feature maps
        '''
        layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x):
        return self.model(x)
 
 
class UnetUp(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UnetUp, self).__init__()
        '''
        process and upscale the image feature maps
        '''
        layers = [
            nn.ConvTranspose2d(in_channels, out_channels, 2, 2),
            ResidualConvBlock(out_channels, out_channels),
            ResidualConvBlock(out_channels, out_channels),
        ]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x, skip):
        x = torch.cat((x, skip), 1)
        x = self.model(x)
        return x
 
 
class EmbedFC(nn.Module):
    def __init__(self, input_dim, emb_dim):
        super(EmbedFC, self).__init__()
        '''
        generic one layer FC NN for embedding things  
        '''
        self.input_dim = input_dim
        layers = [
            nn.Linear(input_dim, emb_dim),
            nn.GELU(),
            nn.Linear(emb_dim, emb_dim),
        ]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x):
        x = x.view(-1, self.input_dim)
        return self.model(x)
class Unet(nn.Module):
    def __init__(self, in_channels, n_feat=256):
        super(Unet, self).__init__()
 
        self.in_channels = in_channels
        self.n_feat = n_feat
 
        self.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)
 
        self.down1 = UnetDown(n_feat, n_feat)
        self.down2 = UnetDown(n_feat, 2 * n_feat)
 
        self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())
 
        self.timeembed1 = EmbedFC(1, 2 * n_feat)
        self.timeembed2 = EmbedFC(1, 1 * n_feat)
 
        self.up0 = nn.Sequential(
            # nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_feat
            nn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_feat
            nn.GroupNorm(8, 2 * n_feat),
            nn.ReLU(),
        )
 
        self.up1 = UnetUp(4 * n_feat, n_feat)
        self.up2 = UnetUp(2 * n_feat, n_feat)
        self.out = nn.Sequential(
            nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),
            nn.GroupNorm(8, n_feat),
            nn.ReLU(),
            nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),
        )
 
    def forward(self, x, t):
        '''
        输入加噪图像和对应的时间step,预测反向噪声的正态分布
        :param x: 加噪图像
        :param t: 对应step
        :return: 正态分布噪声
        '''
        x = self.init_conv(x)
        down1 = self.down1(x)
        down2 = self.down2(down1)
        hiddenvec = self.to_vec(down2)
 
        # embed time step
        temb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)
        temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)
 
        # 将上采样输出与step编码相加,输入到下一个上采样层
        up1 = self.up0(hiddenvec)
        up2 = self.up1(up1 + temb1, down2)
        up3 = self.up2(up2 + temb2, down1)
        out = self.out(torch.cat((up3, x), 1))
        return out
 
class DDPM(nn.Module):
    def __init__(self, model, betas, n_T, device):
        super(DDPM, self).__init__()
        self.model = model.to(device)
 
        # register_buffer 可以提前保存alpha相关,节约时间
        for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():
            self.register_buffer(k, v)
 
        self.n_T = n_T
        self.device = device
        self.loss_mse = nn.MSELoss()
 
    def ddpm_schedules(self, beta1, beta2, T):
        '''
        提前计算各个step的alpha,这里beta是线性变化
        :param beta1: beta的下限
        :param beta2: beta的下限
        :param T: 总共的step数
        '''
        assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"
 
        beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组
        sqrt_beta_t = torch.sqrt(beta_t)
        alpha_t = 1 - beta_t
        log_alpha_t = torch.log(alpha_t)
        alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘
 
        sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘
        oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alpha
 
        sqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)
        mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmab
 
        return {
            "alpha_t": alpha_t,  # \alpha_t
            "oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}
            "sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}
            "alphabar_t": alphabar_t,  # \bar{\alpha_t}
            "sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差
            "sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值
            "mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}
        }
    def forward(self, x):
        """
        训练过程中, 随机选择step和生成噪声
        """
        # 随机选择step
        _ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)
        # 随机生成正态分布噪声
        noise = torch.randn_like(x)  # eps ~ N(0, 1)
        # 加噪后的图像x_t
        x_t = (
                self.sqrtab[_ts, None, None, None] * x
                + self.sqrtmab[_ts, None, None, None] * noise
 
        )
 
        # 将unet预测的对应step的正态分布噪声与真实噪声做对比
        return self.loss_mse(noise, self.model(x_t, _ts / self.n_T))
 
    def sample(self, n_sample, size, device):
        # 随机生成初始噪声图片 x_T ~ N(0, 1)
        x_i = torch.randn(n_sample, *size).to(device)
        for i in range(self.n_T, 0, -1):
            t_is = torch.tensor([i / self.n_T]).to(device)
            t_is = t_is.repeat(n_sample, 1, 1, 1)
 
            z = torch.randn(n_sample, *size).to(device) if i > 1 else 0
 
            eps = self.model(x_i, t_is)
            x_i = x_i[:n_sample]
            x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * z
        return x_i
 
 
class ImageGenerator(object):
    def __init__(self):
        '''
        初始化,定义超参数、数据集、网络结构等
        '''
        self.epoch = 20
        self.sample_num = 100
        self.batch_size = 256
        self.lr = 0.0001
        self.n_T = 400
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.init_dataloader()
        self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)
        self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)
 
    def init_dataloader(self):
        '''
        初始化数据集和dataloader
        '''
        tf = transforms.Compose([
            transforms.ToTensor(),
        ])
        train_dataset = MNIST('./data/',
                              train=True,
                              download=True,
                              transform=tf)
        self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)
        val_dataset = MNIST('./data/',
                            train=False,
                            download=True,
                            transform=tf)
        self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)
 
    def train(self):
        self.sampler.train()
        print('训练开始!!')
        for epoch in range(self.epoch):
            self.sampler.model.train()
            loss_mean = 0
            for i, (images, labels) in enumerate(self.train_dataloader):
                images, labels = images.to(self.device), labels.to(self.device)
 
                # 将latent和condition拼接后输入网络
                loss = self.sampler(images)
                loss_mean += loss.item()
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            train_loss = loss_mean / len(self.train_dataloader)
            print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))
            self.visualize_results(epoch)
 
    @torch.no_grad()
    def visualize_results(self, epoch):
        self.sampler.eval()
        # 保存结果路径
        output_path = 'results/Diffusion'
        if not os.path.exists(output_path):
            os.makedirs(output_path)
 
        tot_num_samples = self.sample_num
        image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
        out = self.sampler.sample(tot_num_samples, (1, 28, 28), self.device)
        save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)
 
 
 
if __name__ == '__main__':
    generator = ImageGenerator()
    generator.train()

3. 有条件实现

import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as F
 
 
class ResidualConvBlock(nn.Module):
    def __init__(
        self, in_channels: int, out_channels: int, is_res: bool = False
    ) -> None:
        super().__init__()
        '''
        standard ResNet style convolutional block
        '''
        self.same_channels = in_channels==out_channels
        self.is_res = is_res
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(out_channels, out_channels, 3, 1, 1),
            nn.BatchNorm2d(out_channels),
            nn.GELU(),
        )
 
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        if self.is_res:
            x1 = self.conv1(x)
            x2 = self.conv2(x1)
            # this adds on correct residual in case channels have increased
            if self.same_channels:
                out = x + x2
            else:
                out = x1 + x2
            return out / 1.414
        else:
            x1 = self.conv1(x)
            x2 = self.conv2(x1)
            return x2
 
 
class UnetDown(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UnetDown, self).__init__()
        '''
        process and downscale the image feature maps
        '''
        layers = [ResidualConvBlock(in_channels, out_channels), nn.MaxPool2d(2)]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x):
        return self.model(x)
 
 
class UnetUp(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(UnetUp, self).__init__()
        '''
        process and upscale the image feature maps
        '''
        layers = [
            nn.ConvTranspose2d(in_channels, out_channels, 2, 2),
            ResidualConvBlock(out_channels, out_channels),
            ResidualConvBlock(out_channels, out_channels),
        ]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x, skip):
        x = torch.cat((x, skip), 1)
        x = self.model(x)
        return x
 
 
class EmbedFC(nn.Module):
    def __init__(self, input_dim, emb_dim):
        super(EmbedFC, self).__init__()
        '''
        generic one layer FC NN for embedding things  
        '''
        self.input_dim = input_dim
        layers = [
            nn.Linear(input_dim, emb_dim),
            nn.GELU(),
            nn.Linear(emb_dim, emb_dim),
        ]
        self.model = nn.Sequential(*layers)
 
    def forward(self, x):
        x = x.view(-1, self.input_dim)
        return self.model(x)
class Unet(nn.Module):
    def __init__(self, in_channels, n_feat=256, n_classes=10):
        super(Unet, self).__init__()
 
        self.in_channels = in_channels
        self.n_feat = n_feat
 
        self.init_conv = ResidualConvBlock(in_channels, n_feat, is_res=True)
 
        self.down1 = UnetDown(n_feat, n_feat)
        self.down2 = UnetDown(n_feat, 2 * n_feat)
 
        self.to_vec = nn.Sequential(nn.AvgPool2d(7), nn.GELU())
 
        self.timeembed1 = EmbedFC(1, 2 * n_feat)
        self.timeembed2 = EmbedFC(1, 1 * n_feat)
        self.conditionembed1 = EmbedFC(n_classes, 2 * n_feat)
        self.conditionembed2 = EmbedFC(n_classes, 1 * n_feat)
 
        self.up0 = nn.Sequential(
            # nn.ConvTranspose2d(6 * n_feat, 2 * n_feat, 7, 7), # when concat temb and cemb end up w 6*n_feat
            nn.ConvTranspose2d(2 * n_feat, 2 * n_feat, 7, 7),  # otherwise just have 2*n_feat
            nn.GroupNorm(8, 2 * n_feat),
            nn.ReLU(),
        )
 
        self.up1 = UnetUp(4 * n_feat, n_feat)
        self.up2 = UnetUp(2 * n_feat, n_feat)
        self.out = nn.Sequential(
            nn.Conv2d(2 * n_feat, n_feat, 3, 1, 1),
            nn.GroupNorm(8, n_feat),
            nn.ReLU(),
            nn.Conv2d(n_feat, self.in_channels, 3, 1, 1),
        )
 
    def forward(self, x, c, t):
        '''
        输入加噪图像和对应的时间step,预测反向噪声的正态分布
        :param x: 加噪图像
        :param c: contition向量
        :param t: 对应step
        :return: 正态分布噪声
        '''
        x = self.init_conv(x)
        down1 = self.down1(x)
        down2 = self.down2(down1)
        hiddenvec = self.to_vec(down2)
 
        # embed time step
        temb1 = self.timeembed1(t).view(-1, self.n_feat * 2, 1, 1)
        temb2 = self.timeembed2(t).view(-1, self.n_feat, 1, 1)
        cemb1 = self.conditionembed1(c).view(-1, self.n_feat * 2, 1, 1)
        cemb2 = self.conditionembed2(c).view(-1, self.n_feat, 1, 1)
 
        # 将上采样输出与step编码相加,输入到下一个上采样层
        up1 = self.up0(hiddenvec)
        up2 = self.up1(cemb1 * up1 + temb1, down2)
        up3 = self.up2(cemb2 * up2 + temb2, down1)
        out = self.out(torch.cat((up3, x), 1))
        return out
 
class DDPM(nn.Module):
    def __init__(self, model, betas, n_T, device):
        super(DDPM, self).__init__()
        self.model = model.to(device)
 
        # register_buffer 可以提前保存alpha相关,节约时间
        for k, v in self.ddpm_schedules(betas[0], betas[1], n_T).items():
            self.register_buffer(k, v)
 
        self.n_T = n_T
        self.device = device
        self.loss_mse = nn.MSELoss()
 
    def ddpm_schedules(self, beta1, beta2, T):
        '''
        提前计算各个step的alpha,这里beta是线性变化
        :param beta1: beta的下限
        :param beta2: beta的下限
        :param T: 总共的step数
        '''
        assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"
 
        beta_t = (beta2 - beta1) * torch.arange(0, T + 1, dtype=torch.float32) / T + beta1 # 生成beta1-beta2均匀分布的数组
        sqrt_beta_t = torch.sqrt(beta_t)
        alpha_t = 1 - beta_t
        log_alpha_t = torch.log(alpha_t)
        alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp() # alpha累乘
 
        sqrtab = torch.sqrt(alphabar_t) # 根号alpha累乘
        oneover_sqrta = 1 / torch.sqrt(alpha_t) # 1 / 根号alpha
 
        sqrtmab = torch.sqrt(1 - alphabar_t) # 根号下(1-alpha累乘)
        mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmab
 
        return {
            "alpha_t": alpha_t,  # \alpha_t
            "oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}
            "sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}
            "alphabar_t": alphabar_t,  # \bar{\alpha_t}
            "sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}} # 加噪标准差
            "sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}  # 加噪均值
            "mab_over_sqrtmab": mab_over_sqrtmab_inv,  # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}
        }
 
    def forward(self, x, c):
        """
        训练过程中, 随机选择step和生成噪声
        """
        # 随机选择step
        _ts = torch.randint(1, self.n_T + 1, (x.shape[0],)).to(self.device)  # t ~ Uniform(0, n_T)
        # 随机生成正态分布噪声
        noise = torch.randn_like(x)  # eps ~ N(0, 1)
        # 加噪后的图像x_t
        x_t = (
                self.sqrtab[_ts, None, None, None] * x
                + self.sqrtmab[_ts, None, None, None] * noise
 
        )
 
        # 将unet预测的对应step的正态分布噪声与真实噪声做对比
        return self.loss_mse(noise, self.model(x_t, c, _ts / self.n_T))
 
    def sample(self, n_sample, c, size, device):
        # 随机生成初始噪声图片 x_T ~ N(0, 1)
        x_i = torch.randn(n_sample, *size).to(device)
        for i in range(self.n_T, 0, -1):
            t_is = torch.tensor([i / self.n_T]).to(device)
            t_is = t_is.repeat(n_sample, 1, 1, 1)
 
            z = torch.randn(n_sample, *size).to(device) if i > 1 else 0
 
            eps = self.model(x_i, c, t_is)
            x_i = x_i[:n_sample]
            x_i = self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i]) + self.sqrt_beta_t[i] * z
        return x_i
 
 
class ImageGenerator(object):
    def __init__(self):
        '''
        初始化,定义超参数、数据集、网络结构等
        '''
        self.epoch = 20
        self.sample_num = 100
        self.batch_size = 256
        self.lr = 0.0001
        self.n_T = 400
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        self.init_dataloader()
        self.sampler = DDPM(model=Unet(in_channels=1), betas=(1e-4, 0.02), n_T=self.n_T, device=self.device).to(self.device)
        self.optimizer = optim.Adam(self.sampler.model.parameters(), lr=self.lr)
 
    def init_dataloader(self):
        '''
        初始化数据集和dataloader
        '''
        tf = transforms.Compose([
            transforms.ToTensor(),
        ])
        train_dataset = MNIST('./data/',
                              train=True,
                              download=True,
                              transform=tf)
        self.train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, drop_last=True)
        val_dataset = MNIST('./data/',
                            train=False,
                            download=True,
                            transform=tf)
        self.val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False)
 
    def train(self):
        self.sampler.train()
        print('训练开始!!')
        for epoch in range(self.epoch):
            self.sampler.model.train()
            loss_mean = 0
            for i, (images, labels) in enumerate(self.train_dataloader):
                images, labels = images.to(self.device), labels.to(self.device)
                labels = F.one_hot(labels, num_classes=10).float()
                # 将latent和condition拼接后输入网络
                loss = self.sampler(images, labels)
                loss_mean += loss.item()
                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()
            train_loss = loss_mean / len(self.train_dataloader)
            print('epoch:{}, loss:{:.4f}'.format(epoch, train_loss))
            self.visualize_results(epoch)
 
    @torch.no_grad()
    def visualize_results(self, epoch):
        self.sampler.eval()
        # 保存结果路径
        output_path = 'results/Diffusion'
        if not os.path.exists(output_path):
            os.makedirs(output_path)
 
        tot_num_samples = self.sample_num
        image_frame_dim = int(np.floor(np.sqrt(tot_num_samples)))
        labels = F.one_hot(torch.Tensor(np.repeat(np.arange(10), 10)).to(torch.int64), num_classes=10).to(self.device).float()
        out = self.sampler.sample(tot_num_samples, labels, (1, 28, 28), self.device)
        save_image(out, os.path.join(output_path, '{}.jpg'.format(epoch)), nrow=image_frame_dim)
 
 
 
if __name__ == '__main__':
    generator = ImageGenerator()
    generator.train()

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1413602.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Flink 集成 Debezium Confluent Avro ( format=debezium-avro-confluent )

博主历时三年精心创作的《大数据平台架构与原型实现:数据中台建设实战》一书现已由知名IT图书品牌电子工业出版社博文视点出版发行,点击《重磅推荐:建大数据平台太难了!给我发个工程原型吧!》了解图书详情,京东购书链接:https://item.jd.com/12677623.html,扫描左侧二维…

Hadoop-MapReduce-MRAppMaster启动篇

一、源码下载 下面是hadoop官方源码下载地址&#xff0c;我下载的是hadoop-3.2.4&#xff0c;那就一起来看下吧 Index of /dist/hadoop/core 二、上下文 在上一篇<Hadoop-MapReduce-源码跟读-客户端篇>中已经将到&#xff1a;作业提交到ResourceManager&#xff0c;那…

Bitbucket第一次代码仓库创建/提交/创建新分支/合并分支/忽略ignore

1. 首先要在bitbucket上创建一个项目&#xff0c;这个我没有权限创建&#xff0c;是找的管理员创建的。 管理员创建之后&#xff0c;这个项目给了我权限&#xff0c;我就可以创建我的代码仓库了。 2. 点击这个Projects下的具体项目名字&#xff0c;就会进入这样一个页面&#…

EG-2121CA (晶体振荡器 低抖动表面声波(SAW)振荡器)

在当今高度数字化的时代&#xff0c;稳定的信号传输显得尤为重要。若要实现信号的稳定传输&#xff0c;晶体振荡器必不可少。EG-2121CA&#xff0c;它是一款低抖动表面声波&#xff08;SAW&#xff09;振荡器设计的产品&#xff0c;凭借其出色的频率范围、稳定的电源电压和可靠…

网络安全全栈培训笔记(58-服务攻防-应用协议设备KibanaZabbix远控向日葵VNCTV)

第58天 服务攻防-应用协议&设备Kibana&Zabbix&远控向日葵&VNC&TV 知识点&#xff1a; 1、远程控制第三方应用安全 2、三方应用-向日葵&VNC&TV 3、设备平台-Zabbix&Kibanai漏洞 章节内容&#xff1a; 常见版务应用的安全测试&#xff1a; 1…

甲基四嗪-PEG4-叠氮,Methyltetrazine PEG4 azide,可以作为连接各种生物分子的桥梁

您好&#xff0c;欢迎来到新研之家 文章关键词&#xff1a;甲基四嗪-四聚乙二醇-叠氮&#xff0c;甲基四嗪-PEG4-叠氮&#xff0c;Methyltetrazine PEG4 azide &#xff0c;Methyltetrazine PEG4 N3 一、基本信息 产品简介&#xff1a;Methyltetrazine PEG4 azide is a comp…

简单记录一下如何安装python以及pycharm(图文教程)(可供福建专升本理工类同学使用)

本教程主要给不懂计算机的或者刚刚开始学习python的同学&#xff08;福建专升本理工类&#xff09;&网友学习使用&#xff0c;基础操作&#xff0c;比较详细&#xff0c;其他问题等待补充&#xff01; 安装Python 1.进入python官网&#xff08;https://www.python.org/&a…

10.Golang中的map

目录 概述map实践map声明代码 map使用代码 结束 概述 map实践 map声明 代码 package mainimport ("fmt" )func main() {// 声明方式1var map1 map[string]stringif map1 nil {fmt.Println("map1为空")}// 没有分配空间&#xff0c;是不能使用的// map…

关于在微信小程序中使用taro + react-hook后销毁函数无法执行的问题

问题&#xff1a; 在 taro中使用navigageTo() 跳转路由后hook中useEffect 的return函数没有执行 没有执行return函数 框架版本&#xff1a; tarojs: 3.6 react: 18.0 原因&#xff1a; 使用navigateTo() 跳转路由的话并不会销毁页面和组件&#xff0c;会加入一…

携程开源 基于真实请求与数据的流量回放测试平台、自动化接口测试平台AREX

携程开源 基于真实请求与数据的流量回放测试平台、自动化接口测试平台AREX 官网文档 基于真实请求与数据的流量回放测试平台、自动化接口测试平台AREX 这篇文章稍稍水一下&#xff0c;主要讲下部署过程里踩的坑&#xff0c;因为部署的过程主要是运维同学去处理了&#xff0c;我…

【Java与网络3】Java网络编程之初体验

我们平时极少使用Java来直接写网络通信相关的程序&#xff0c;一般都使用Tomcat Web服务或者Netty等框架来帮助我们做&#xff0c;不过呢&#xff0c;要想将技术学到家&#xff0c;我们研究一下基本的网络编程还是非常必要的&#xff0c;这样可以让我们将很多内容融会贯通&…

把批量M3U8网络视频地址转为MP4视频

在数字媒体时代&#xff0c;视频格式的转换已成为一项常见的需求。尤其对于那些经常处理网络视频的用户来说&#xff0c;将M3U8格式的视频转换为更常见的MP4格式是一项必备技能。幸运的是&#xff0c;现在有了固乔剪辑助手这款强大的工具&#xff0c;这一过程变得异常简单。下面…

单片机学习笔记---矩阵键盘

目录 矩阵键盘的介绍 独立按键和矩阵按键的相同之处&#xff1a; 矩阵按键的扫描 代码演示 代码模块化移植 Keil自定义模板步骤&#xff1a; 代码编写 矩阵键盘就是开发板上右下角的这个模块 这一节的代码是基于上一节讲的LCD1602液晶显示屏驱动代码进行的 矩阵键盘的介…

大数据学习之Flink算子、了解(Source)源算子(基础篇二)

Source源算子&#xff08;基础篇二&#xff09; 目录 Source源算子&#xff08;基础篇二&#xff09; 二、源算子&#xff08;source&#xff09; 1. 准备工作 2.从集合中读取数据 可以使用代码中的fromCollection()方法直接读取列表 也可以使用代码中的fromElements()方…

用户密码网络传输、保存方案分析

大华 1、大华19年的IPC&#xff0c;登录认证接口有两次&#xff0c;第一次请求算法所需数据&#xff0c;第二次传输摘要值&#xff0c;看样子是私有算法。 2、添加用户传输用户密码等敏感数据时&#xff0c;使用"RPAC-256"算法&#xff0c;应该是大华内部的私有算法。…

【从零到一】跑通CATR(二):在并行超算云上使用Cifar-10进行测试

从零到一配环境篇 由于今年要展开大量的编程工作&#xff0c;实验室在用的云计算平台是并行超算云&#xff0c;因此打算在寒假期间先熟悉一下超算云的环境&#xff0c;并从配套的文档和网上的教程开始&#xff0c;从零到一先跑通一个用于音视频分割的模型CATR。 以blog的形式…

docker-compose部署单机ES+Kibana

记录部署的操作步骤 准备工作编写docker-compose.yml启动服务验证部署结果 本次elasticsearch和kibana版本为8.2.2 使用环境&#xff1a;centos7.9 本次记录还包括&#xff1a;安装elasticsearch中文分词插件和拼音分词插件 准备工作 1、创建目录和填写配置 mkdir /home/es/s…

Vue3中的ref和shallowRef、reactive和shallowReactive

一&#xff1a;ref、reactive简介 ref和reactive是Vue3中定义响应式数据的一种方式。ref通常用来定义基础类型数据。reactive通常用来定义复杂类型数据。 二、shallowRef、shallowReactive简介 shallowRef和shallowReactive是Vue3中定义浅层次响应式数据的方式 三、Api使用对比…

【寒假每日一题·2024】AcWing 5307. 小苹果(补)

文章目录 一、题目1、原题链接2、题目描述 二、解题报告1、思路分析2、时间复杂度3、代码详解 三、知识风暴 一、题目 1、原题链接 5307. 小苹果 2、题目描述 二、解题报告 1、思路分析 思路参考y总&#xff1a;y总讲解视频 &#xff08;1&#xff09;根据题目可以分析出&…

中小型企业机房设计部署方案

我对接参与过至少十几个分公司和总部的机房设计&#xff0c;结合十几年实际工作管理经验&#xff0c;归纳设计了以下这个机房方案&#xff0c;这个机房最大化利用了空间的同时&#xff0c;最大化设计了各方面的冗余。 机房包含了UPS隔离&#xff0c;噪音隔离&#xff0c;功率冗…