PyToch 深度学习 || 卷积神经网络分类

news2024/11/26 4:48:43

卷积神经网络分类

import torch
import torch.nn as nn
import torchvision
import numpy as np
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torch.utils.data as Data
from torchvision import datasets, models, transforms

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

trainset = torchvision.datasets.CIFAR10(root='./', train=True, download=False, transform=transform) #已下载用download=False,否则为True
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=False, num_workers=4)

testset = torchvision.datasets.CIFAR10(root='./', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=True, num_workers=4)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.pool = nn.MaxPool2d(2, 2)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
        
    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

net = Net()

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
nums_epoch = 2
for epoch in range(nums_epoch):
    _loss = 0.0
    for i, (inputs, labels) in enumerate(trainloader, 0):
        inputs, labels = inputs.to(device), labels.to(device)
        optimizer.zero_grad()
        
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        
        _loss += loss.item()
        if i % 3000 == 2999:
            print('[%d, %5d] 损失: %.3f' %
                  (epoch + 1, i + 1, _loss / 3000))
            _loss = 0.0

print('训练结束')
def imshow(img):
    img = img / 2 + 0.5
    npimg = img.numpy()
    plt.imshow(np.transpose(npimg, (1, 2, 0)))
    plt.show()

dataiter = iter(testloader)
images, labels = next(dataiter)
imshow(torchvision.utils.make_grid(images))
print('图像真实分类: ', ' '.join(['%5s' % classes[labels[j]] for j in range(4)]))

在这里插入图片描述

图像真实分类: plane frog deer ship

outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('图像预测分类: ', ' '.join(['%5s' % classes[predicted[j]] for j in range(4)]))
correct, total = 0, 0
with torch.no_grad():
    for images, labels in testloader:
        outputs = net(images)
        _, predicted = torch.max(outputs, 1)
        total += labels.size(0)
        correct += (labels == predicted).sum().item()
    
print('测试集准确率: %d %%' % (100 * correct / total))

测试集准确率: 53 %

2. 搭建图像自动识别模型

import torch
import torchvision
import torch.utils.data as Data
import torch.nn as nn
import torch.nn.functional as F
train_data = torchvision.datasets.MNIST(
    root = './',
    train = True,
    transform = torchvision.transforms.ToTensor(),
    download = False)

test_data = torchvision.datasets.MNIST(
    root='./',
    train=False)

test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)/255
test_y = test_data.targets
class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=16,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=16,
                out_channels=32,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )
        self.output = nn.Linear(32*7*7,10)

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = out.view(out.size(0),-1)
        out = self.output(out)
        return out

cnn = CNN()
LR = 0.001
EPOCH = 3
BATCH_SIZE= 50

optimizer = torch.optim.Adam(cnn.parameters(),lr=LR,)
loss_func = nn.CrossEntropyLoss()

train_loader = Data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True)
test_data = torchvision.datasets.MNIST(root='./',train=False)

for epoch in range(EPOCH):
    for step ,(b_x,b_y) in enumerate(train_loader):
        output = cnn(b_x)
        loss = loss_func(output,b_y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step%50 ==0:
            test_output = cnn(test_x)
            pred_y = torch.max(test_output, 1)[1].data.numpy()
            accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))

torch.save(cnn,'cnn_minist.pkl')
cnn = torch.load('cnn_minist.pkl')

test_output = cnn(test_x[:20])
pred_y = torch.max(test_output, 1)[1].data.numpy()

print('预测值', pred_y)
print('实际值', test_y[:20].numpy())

test_output1 = cnn(test_x)
pred_y1 = torch.max(test_output1, 1)[1].data.numpy()
accuracy = float((pred_y1 == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
print('准确率',accuracy)

预测值 [7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4]
实际值 [7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4]
准确率 0.9874

import torch
import torchvision
import torch.utils.data as Data
import torch.nn as nn
import torch.nn.functional as F

train_data = torchvision.datasets.MNIST(
    root = './',
    train = True,
    transform = torchvision.transforms.ToTensor(),
    download = False)

test_data = torchvision.datasets.MNIST(
    root='./',
    train=False)

test_x = torch.unsqueeze(test_data.data, dim=1).type(torch.FloatTensor)/255
test_y = test_data.targets

class CNN(nn.Module):
    def __init__(self):
        super(CNN,self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(
                in_channels=1,
                out_channels=16,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(
                in_channels=16,
                out_channels=32,
                kernel_size=3,
                stride=1,
                padding=1
            ),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)
        )
        self.output = nn.Linear(32*7*7,10)

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        out = out.view(out.size(0),-1)
        out = self.output(out)
        return out

cnn = CNN()

optimizer = torch.optim.Adam(cnn.parameters(),lr=LR,)
loss_func = nn.CrossEntropyLoss()

for epoch in range(EPOCH):
    for step ,(b_x,b_y) in enumerate(train_loader):
        output = cnn(b_x)
        loss = loss_func(output,b_y)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step%50 ==0:
            test_output = cnn(test_x)
            pred_y = torch.max(test_output, 1)[1].data.numpy()
            accuracy = float((pred_y == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
            
torch.save(cnn,'cnn_minist.pkl')

cnn = torch.load('cnn_minist.pkl')

test_output = cnn(test_x[:20])
pred_y = torch.max(test_output, 1)[1].data.numpy()

print('预测值', pred_y)
print('实际值', test_y[:20].numpy())

test_output1 = cnn(test_x)
pred_y1 = torch.max(test_output1, 1)[1].data.numpy()
accuracy = float((pred_y1 == test_y.data.numpy()).astype(int).sum()) / float(test_y.size(0))
print('准确率',accuracy)

预测值 [7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4]
实际值 [7 2 1 0 4 1 4 9 5 9 0 6 9 0 1 5 9 7 3 4]
准确率 0.9856

3. 搭建图像自动分割模型

import os
import cv2
import numpy as np
import torch
from torch import nn
from torch.utils.data import Dataset

class MyDataset(Dataset):
    def __init__(self, train_path, transform=None):
        self.images = os.listdir(train_path + '/last')
        self.labels = os.listdir(train_path + '/last_msk')
        assert len(self.images) == len(self.labels), 'Number does not match'
        self.transform = transform
        self.images_and_labels = []
        for i in range(len(self.images)):
            self.images_and_labels.append((train_path + '/last/' + self.images[i], train_path + '/last_msk/' + self.labels[i]))

    def __getitem__(self, item):
        img_path, lab_path = self.images_and_labels[item]
        img = cv2.imread(img_path)
        img = cv2.resize(img, (224, 224))
        lab = cv2.imread(lab_path, 0)
        lab = cv2.resize(lab, (224, 224))
        lab = lab / 255
        lab = lab.astype('uint8')
        lab = np.eye(2)[lab]
        lab = np.array(list(map(lambda x: abs(x-1), lab))).astype('float32')
        lab = lab.transpose(2, 0, 1)
        if self.transform is not None:
            img = self.transform(img)
        return img, lab

    def __len__(self):
        return len(self.images)

if __name__ == '__main__':
    img = cv2.imread('data/train/last_msk/50.jpg', 0)
    img = cv2.resize(img, (16, 16))
    img2 = img/255
    img3 = img2.astype('uint8')
    hot1 = np.eye(2)[img3]
    hot2 = np.array(list(map(lambda x: abs(x-1), hot1)))
    print(hot2.shape)

(16, 16, 2)

class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.encode1 = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(True),
            nn.MaxPool2d(2, 2)
        )
        self.encode2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(True),
            nn.MaxPool2d(2, 2)
        )
        self.encode3 = nn.Sequential(
            nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.Conv2d(256, 256, 3, 1, 1),
            nn.BatchNorm2d(256),
            nn.ReLU(True),
            nn.MaxPool2d(2, 2)
        )
        self.encode4 = nn.Sequential(
            nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.MaxPool2d(2, 2)
        )
        self.encode5 = nn.Sequential(
            nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.Conv2d(512, 512, 3, 1, 1),
            nn.BatchNorm2d(512),
            nn.ReLU(True),
            nn.MaxPool2d(2, 2)
        )
        self.decode1 = nn.Sequential(
            nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3,
                               stride=2, padding=1, output_padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(True)
        )
        self.decode2 = nn.Sequential(
            nn.ConvTranspose2d(256, 128, 3, 2, 1, 1),
            nn.BatchNorm2d(128),
            nn.ReLU(True)
        )
        self.decode3 = nn.Sequential(
            nn.ConvTranspose2d(128, 64, 3, 2, 1, 1),
            nn.BatchNorm2d(64),
            nn.ReLU(True)
        )
        self.decode4 = nn.Sequential(
            nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),
            nn.BatchNorm2d(32),
            nn.ReLU(True)
        )
        self.decode5 = nn.Sequential(
            nn.ConvTranspose2d(32, 16, 3, 2, 1, 1),
            nn.BatchNorm2d(16),
            nn.ReLU(True)
        )
        self.classifier = nn.Conv2d(16, 2, kernel_size=1)

    def forward(self, x):
        out = self.encode1(x)
        out = self.encode2(out)
        out = self.encode3(out)
        out = self.encode4(out)
        out = self.encode5(out)
        out = self.decode1(out)
        out = self.decode2(out)
        out = self.decode3(out)
        out = self.decode4(out)
        out = self.decode5(out)
        out = self.classifier(out)
        return out

if __name__ == '__main__':
    img = torch.randn(2, 3, 224, 224)
    net = Net()
    sample = net(img)
    print(sample.shape)

torch.Size([2, 2, 224, 224])

import os
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torchvision import transforms
from torch.utils.data import DataLoader

batchsize = 8
epochs = 20
train_data_path = 'data/train'

transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
bag = MyDataset(train_data_path, transform)
dataloader = DataLoader(bag, batch_size=batchsize, shuffle=True)

device = torch.device('cpu')
#device = torch.device('cuda')
net = Net().to(device)
criterion = nn.BCELoss()
optimizer = optim.SGD(net.parameters(), lr=1e-2, momentum=0.7)

if not os.path.exists('checkpoints'):
    os.mkdir('checkpoints')

for epoch in range(1, epochs+1):
    for batch_idx, (img, lab) in enumerate(dataloader):
        img, lab = img.to(device), lab.to(device)
        output = torch.sigmoid(net(img))
        loss = criterion(output, lab)

        output_np = output.cpu().data.numpy().copy()
        output_np = np.argmin(output_np, axis=1)
        y_np = lab.cpu().data.numpy().copy()
        y_np = np.argmin(y_np, axis=1)

        if batch_idx % 20 == 0:
            print('Epoch:[{}/{}]\tStep:[{}/{}]\tLoss:{:.6f}'.format(
                epoch, epochs, (batch_idx+1)*len(img), len(dataloader.dataset), loss.item()))

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    if epoch % 10 == 0:
        torch.save(net, './model/model_epoch_{}.pth'.format(epoch))
        print('./model/model_epoch_{}.pth saved!'.format(epoch))

Epoch:[1/20] Step:[8/50] Loss:0.701965
Epoch:[2/20] Step:[8/50] Loss:0.698032
Epoch:[3/20] Step:[8/50] Loss:0.695077
Epoch:[4/20] Step:[8/50] Loss:0.669016
Epoch:[5/20] Step:[8/50] Loss:0.674715
Epoch:[6/20] Step:[8/50] Loss:0.668930
Epoch:[7/20] Step:[8/50] Loss:0.668005
Epoch:[8/20] Step:[8/50] Loss:0.643131
Epoch:[9/20] Step:[8/50] Loss:0.666685
Epoch:[10/20] Step:[8/50] Loss:0.643606
./model/model_epoch_10.pth saved!
Epoch:[11/20] Step:[8/50] Loss:0.640421
Epoch:[12/20] Step:[8/50] Loss:0.631368
Epoch:[13/20] Step:[8/50] Loss:0.612624
Epoch:[14/20] Step:[8/50] Loss:0.598883
Epoch:[15/20] Step:[8/50] Loss:0.596404
Epoch:[16/20] Step:[8/50] Loss:0.580273
Epoch:[17/20] Step:[8/50] Loss:0.605920
Epoch:[18/20] Step:[8/50] Loss:0.575018
Epoch:[19/20] Step:[8/50] Loss:0.560484
Epoch:[20/20] Step:[8/50] Loss:0.533663
./model/model_epoch_20.pth saved!

class TestDataset(Dataset):
    def __init__(self, test_img_path, transform=None):
        self.test_img = os.listdir(test_img_path)
        self.transform = transform
        self.images = []
        for i in range(len(self.test_img)):
            self.images.append(os.path.join(test_img_path, self.test_img[i]))

    def __getitem__(self, item):
        img_path = self.images[item]
        img = cv2.imread(img_path)
        img = cv2.resize(img, (224, 224))
        if self.transform is not None:
            img = self.transform(img)
        return img

    def __len__(self):
        return len(self.test_img)

test_img_path = './data/test/last'
checkpoint_path = './model/model_epoch_20.pth'
save_dir = 'data/test/result'
if not os.path.exists(save_dir ):
    os.mkdir(save_dir )

transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
bag = TestDataset(test_img_path, transform)
dataloader = DataLoader(bag, batch_size=1, shuffle=None)

net = torch.load(checkpoint_path)
#net = net.cuda()
for idx, img in enumerate(dataloader):
#    img = img.cuda()
    output = torch.sigmoid(net(img))

    output_np = output.cpu().data.numpy().copy()
    output_np = np.argmin(output_np, axis=1)

    img_arr = np.squeeze(output_np)
    img_arr = img_arr*255
    cv2.imwrite('%s/%03d.png'%(save_dir, idx), img_arr)
    print('%s/%03d.png'%(save_dir, idx))

data/test/result/000.png
data/test/result/001.png
data/test/result/002.png
data/test/result/003.png
data/test/result/004.png

Do Pytorch Ch6

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/627577.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

【业务功能篇20】Springboot java逻辑实现动态行转列需求

在此前,我也写过一个行转列的文章,是用存储过程sql处理的一个动态的逻辑 Mysql 存储过程\Mybatis框架call调用 实现动态行转列 那么后面我们同样又接收了业务的一个新需求,针对的是不同的业务数据,做的同样的一个展示数据报表&…

【C++11】 initializer_list | 右值引用 | 移动构造 | 完美转发

文章目录 1. 统一的列表初始化{ } 初始化initializer_list 2. 引用左值引用右值引用左值引用与右值引用的相互转换右值引用的真正使用场景移动构造 C98与C11传值返回问题注意事项总结 3. 完美转发 1. 统一的列表初始化 { } 初始化 C11 扩大了括号括起的列表(初始化列表)的使用…

使用PHP导出Excel时处理复杂表头的万能方法

使用PHP导出Excel时,如果是一级表头处理起来很简单,但如果碰到复杂一点的表头,比如二级、三级,甚至更多级别的表头要怎么办呢? 就像下面这个表头,有三层,并且每层都不太规则—— 难道我们每次处…

动态绑定v-model,并解决输入框无法输入和无法双向绑定问题

问题:在界面中想要动态获取数据库中返回的数据,作为下拉的值,每个下拉值中又包含不同的属性信息,给输入框动态绑定v-model,但是绑定成功后输入框内无法输入内容,且没有双向绑定 解决思路:1.双向…

SIM:基于搜索的用户终身行为序列建模

SIM:基于搜索的用户终身行为序列建模 论文:《Search-based User Interest Modeling with Lifelong Sequential Behavior Data for Click-Through Rate Prediction》 下载地址:https://arxiv.org/abs/2006.05639 1、用户行为序列建模回顾 1…

在 AWS 上使用 OpenText 实现业务关键型应用程序的现代化

通过在云中进行信息管理建立持久的竞争优势 创新在云中发生的速度比以往任何时候都快。 企业面临着数字经济快速转型的挑战,充分释放业务信息的能力对于建立持久的竞争优势至关重要。为分散的员工扩大安全可靠的协作范围将是生产力和创新的关键驱动力。 如今大多…

Web UI自动化测试之元素定位

目前,在自动化测试的实际应用中,接口自动化测试被广泛使用,但UI自动化测试也并不会被替代。让我们看看二者的对比: 接口自动化测试是跳过前端界面直接对服务端的测试,执行效率和覆盖率更高,维护成本更低&am…

【EtherCAT】一、入门基础

什么是EtherCAT? 介绍简介特点和优势EtherCAT系统组成主站从站 硬件EtherCAT主站芯片EtherCAT从站芯片 EtherCAT应用层协议 工具软件 介绍 简介 EtherCAT(Ethernet Control Automation Technology)是一种高性能实时以太网通信协议&#xff…

Ubuntu20.04设置开机自启动脚本

1.建立开机启动服务 sudo vim /lib/systemd/system/rc-local.service 在末尾添加 [Install] WantedBymulti-user.target Aliasrc-local.service2.创建 /etc/rc.local sudo touch /etc/rc.local && sudo chmod 755 /etc/rc.local #!/bin/bash cd /home/docker-data/ss…

前端框架笔记

Vue.js的安装 安装Vue.js有两种方法&#xff1a; &#xff08;1&#xff09;类似于Bootstrap或jQuery&#xff0c;直接通过HTML文件中的标签引用。为了方便开发者使用&#xff0c;Vue.js提供了相关的CDN&#xff0c;通过如下代码可以引用最新版本的Vue.js&#xff1a; <sc…

小黑回到学校,跟小老黑中老黑阿黄一起度过最后在学校的日子的leetcode之旅:3. 无重复字符的最长子串

双指针动态滑动窗口 class Solution:def lengthOfLongestSubstring(self, s: str) -> int:# 字符串长度n len(s)# 双指针left 0right 0# 存储集合set_ set()# 当前子串长度cur_len 0# 结果result 0# 分别遍历每一个右指针while right < n:# 该字符是重复的&#x…

向量相似搜索绕不开的局部敏感哈希

在搜索推荐中&#xff0c;通常使用相似Embedding进行推荐&#xff0c;此时就会有一个问题&#xff1a;如何快速找到与一个Embedding相近的其他Embedding。 如果两个Embedding在同一个向量空间中&#xff0c;我们就可以通过很多种方式&#xff08;内积、余弦、欧氏距离等&#…

python3 爬虫相关学习8:python 的常见报错内容 汇总收集

目录 1 拼写错误 AttributeError: NameError: 等等 2 类型错误 TypeError: 如字符串连接错误 TypeError: can only concatenate str (not “int“) to str 3 意外缩进 IndentationError: unexpected indent 4 找不到对应模块 ModuleNotFoundError: 5 语法错误 Syntax…

【Docker】deepin/centos安装docker

deepin虚拟机和centos服务器安装docker 1.更新软件包 # deepin sudo apt-get update && sudo apt-get upgrade # centos sudo yum update && yum upgrade安装docker之前&#xff0c;先更新一下软件包 mothramothra-PC:~$ sudo apt-get update && sud…

《Lua程序设计》--学习6

日期和时间 第1种表示方式是一个数字&#xff0c;这个数字通常是一个整型数。尽管并非是ISO C所必需的&#xff0c;但在大多数系统中这个数字是自一个被称为纪元&#xff08;epoch&#xff09;的固定日期后至今的秒数。 Lua语言针对日期和时间提供的第2种表示方式是一个表。日…

苹果新专利曝光,用户可通过Apple Watch及MR头显摄像头设置3D虚拟化身

美国专利商标局公布了苹果公司的一项专利申请&#xff0c;涉及提供计算机生成体验的计算机系统&#xff0c;包括但不限于通过显示器提供 VR 和 MR 体验的电子设备。 在一个例子中&#xff0c;苹果展示了 Apple Watch 的未来版本&#xff0c;该版本将允许用户使用 Apple Watch…

ceph安装搭建总结

ceph安装搭建总结 大纲 版本选择集群架构免密登录安装ceph-deploy部署ceph集群安装mgr安装ceph-dashboard 版本选择 ceph 版本信息如下 本次测试使用版本为Octopus 主版本号为15&#xff0c; 并且使用ceph-deploy 2.0.1安装ceph集群 Ceph Octopus 官方文档 相关环境与软件…

找不到msvcp140.dll无法继续执行代码,解决方法

msvcp140.dll电脑文件中的dll文件&#xff0c;即动态链接库文件&#xff0c;若计算机中丢失了某个dll文件&#xff0c;就会导致某些软件和游戏等程序无法正常启动运行&#xff0c;并且导致电脑系统弹窗报错&#xff0c;其安装方法&#xff1a;1、打开浏览器输入“【dll修复程序…

迅为iTOP-RK3588开发板Android12源码定制开发kernel开发

内核版本是 5.10.66 版本&#xff0c;内核默认的配置文件是 3588-android12/kernel-5.10/arch/arm64/configs/rockchip_defconfig 如果我们要使用图形化界面配置内核&#xff0c;操作方法如下所示&#xff1a; 方法一&#xff1a; 1 首先将默认的配置文件 rockchip_defconf…

stable diffusion其他微调方法

textual inversion 发布时间&#xff1a;2022 目标&#xff1a;与DreamBooth一样&#xff0c;都是想要微调模型生成特定目标或风格的图像 方法&#xff1a;通过在vocabulary中添加一个额外的embedding来学习输入的新特征。预训练模型的所有参数都锁住&#xff0c;只有新的emb…