tensorflow2模型保存和恢复

news2025/1/10 16:07:24

有两种方法可以保存模型:

  • ·使用检查点,一种简单的在硬盘上保存变量的方法
  • ·使用SavedModel,模型结构及检查点

检查点不包含任何关于模型自身的描述:它们只是一种简单的存储参数并能让开发者正确恢复它的方法。

SavedModel格式在保存参数值的基础上加上了计算过程的序列化描述

在TensorFlow 2.0中可以用两个对象保存和恢复模型参数:

  • ·tf.train.Checkpoint是一个基于对象的序列化/反序列化器。
  • ·tf.train.CheckpointManager是一个能用tf.train.Checkpoint实例来存储和管理检查点的对象。

 

import tensorflow as tf
from tensorflow.keras.datasets import fashion_mnist

def make_model(n_classes):
    return tf.keras.Sequential(
        [
            tf.keras.layers.Conv2D(
                32, (5, 5), activation=tf.nn.relu, input_shape=(28, 28, 1)
            ),
            tf.keras.layers.MaxPool2D((2, 2), (2, 2)),
            tf.keras.layers.Conv2D(64, (3, 3), activation=tf.nn.relu),
            tf.keras.layers.MaxPool2D((2, 2), (2, 2)),
            tf.keras.layers.Flatten(),
            tf.keras.layers.Dense(1024, activation=tf.nn.relu),
            tf.keras.layers.Dropout(0.5),
            tf.keras.layers.Dense(n_classes),
        ]
    )


def load_data():
    (train_x, train_y), (test_x, test_y) = fashion_mnist.load_data()
    # Scale input in [-1, 1] range
    train_x = tf.expand_dims(train_x, -1)
    train_x = (tf.image.convert_image_dtype(train_x, tf.float32) - 0.5) * 2
    train_y = tf.expand_dims(train_y, -1)

    test_x = test_x / 255.0 * 2 - 1
    test_x = (tf.image.convert_image_dtype(test_x, tf.float32) - 0.5) * 2
    test_y = tf.expand_dims(test_y, -1)

    return (train_x, train_y), (test_x, test_y)


def train():
    # Define the model
    n_classes = 10
    model = make_model(n_classes)

    # Input data
    (train_x, train_y), (test_x, test_y) = load_data()

    # Training parameters
    loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
    step = tf.Variable(1, name="global_step")
    optimizer = tf.optimizers.Adam(1e-3)

    ckpt = tf.train.Checkpoint(step=step, optimizer=optimizer, model=model)
    manager = tf.train.CheckpointManager(ckpt, "./tf_ckpts", max_to_keep=3)
    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        print(f"Restored from {manager.latest_checkpoint}")
    else:
        print("Initializing from scratch.")

    accuracy = tf.metrics.Accuracy()
    mean_loss = tf.metrics.Mean(name="loss")

    # Train step function
    @tf.function
    def train_step(inputs, labels):
        with tf.GradientTape() as tape:
            logits = model(inputs)
            loss_value = loss(labels, logits)

        gradients = tape.gradient(loss_value, model.trainable_variables)
        # TODO: apply gradient clipping here
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
        step.assign_add(1)

        accuracy.update_state(labels, tf.argmax(logits, -1))
        return loss_value, accuracy.result()

    epochs = 10
    batch_size = 32
    nr_batches_train = int(train_x.shape[0] / batch_size)
    print(f"Batch size: {batch_size}")
    print(f"Number of batches per epoch: {nr_batches_train}")

    train_summary_writer = tf.summary.create_file_writer("./log/train")

    with train_summary_writer.as_default():
        for epoch in range(epochs):
            for t in range(nr_batches_train):
                start_from = t * batch_size
                to = (t + 1) * batch_size

                features, labels = train_x[start_from:to], train_y[start_from:to]

                loss_value, accuracy_value = train_step(features, labels)
                mean_loss.update_state(loss_value)

                if t % 10 == 0:
                    print(f"{step.numpy()}: {loss_value} - accuracy: {accuracy_value}")
                    save_path = manager.save()
                    print(f"Checkpoint saved: {save_path}")
                    tf.summary.image(
                        "train_set", features, max_outputs=3, step=step.numpy()
                    )
                    tf.summary.scalar("accuracy", accuracy_value, step=step.numpy())
                    tf.summary.scalar("loss", mean_loss.result(), step=step.numpy())
                    accuracy.reset_states()
                    mean_loss.reset_states()
            print(f"Epoch {epoch} terminated")
            # Measuring accuracy on the whole training set at the end of epoch
            for t in range(nr_batches_train):
                start_from = t * batch_size
                to = (t + 1) * batch_size
                features, labels = train_x[start_from:to], train_y[start_from:to]
                logits = model(features)
                accuracy.update_state(labels, tf.argmax(logits, -1))
            print(f"Training accuracy: {accuracy.result()}")
            accuracy.reset_states()


if __name__ == "__main__":
    train()
import tensorflow as tf
 
# 模型
class Net(tf.keras.Model):
    """A simple linear model."""
 
    def __init__(self):
        super(Net, self).__init__()
        self.l1 = tf.keras.layers.Dense(5)
    def call(self, x):
        return self.l1(x)
net = Net()
# keras保存权重
net.save_weights('easy_checkpoint')  # 从 tf.keras 训练 API 保存
 
# 加载数据
def toy_dataset():
    inputs = tf.range(10.)[:, None]
    labels = inputs * 5. + tf.range(5.)[None, :]
    return tf.data.Dataset.from_tensor_slices(
        dict(x=inputs, y=labels)).repeat().batch(2)
dataset = toy_dataset()
 
# 更新梯度步骤
def train_step(net, example, optimizer):
    """Trains `net` on `example` using `optimizer`."""
    with tf.GradientTape() as tape:
        output = net(example['x'])
        loss = tf.reduce_mean(tf.abs(output - example['y']))
    variables = net.trainable_variables
    gradients = tape.gradient(loss, variables)
    optimizer.apply_gradients(zip(gradients, variables))
    return loss
 
# 优化器
opt = tf.keras.optimizers.Adam(0.1)
# 迭代数据
iterator = iter(dataset)
# 设置检查点
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net, iterator=iterator)
# 设置检查点管理
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
 
# 输入训练网络和检查点管理,开始训练
def train_and_checkpoint(net, manager):
    ckpt.restore(manager.latest_checkpoint)
    if manager.latest_checkpoint:
        print("Restored from {}".format(manager.latest_checkpoint))
    else:
        print("Initializing from scratch.")
 
    for _ in range(50):
        example = next(iterator)
        loss = train_step(net, example, opt)
        ckpt.step.assign_add(1)
        if int(ckpt.step) % 10 == 0:
            save_path = manager.save()
            print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
            print("loss {:1.2f}".format(loss.numpy()))
train_and_checkpoint(net, manager)
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import numpy as np
mnist = keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype(np.float32)
x_test = x_test[..., tf.newaxis].astype(np.float32)

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(x_test.shape[0])

class MyModel(keras.Model):
    # Set layers.
    def __init__(self):
        super(MyModel, self).__init__()
        # Convolution Layer with 32 filters and a kernel size of 5.
        self.conv1 = layers.Conv2D(32, kernel_size=5, activation=tf.nn.relu)
        # Max Pooling (down-sampling) with kernel size of 2 and strides of 2.
        self.maxpool1 = layers.MaxPool2D(2, strides=2)

        # Convolution Layer with 64 filters and a kernel size of 3.
        self.conv2 = layers.Conv2D(64, kernel_size=3, activation=tf.nn.relu)
        # Max Pooling (down-sampling) with kernel size of 2 and strides of 2.
        self.maxpool2 = layers.MaxPool2D(2, strides=2)

        # Flatten the data to a 1-D vector for the fully connected layer.
        self.flatten = layers.Flatten()

        # Fully connected layer.
        self.fc1 = layers.Dense(1024)
        # Apply Dropout (if is_training is False, dropout is not applied).
        self.dropout = layers.Dropout(rate=0.5)

        # Output layer, class prediction.
        self.out = layers.Dense(10)

    # Set forward pass.
    def call(self, x, is_training=False):
        x = tf.reshape(x, [-1, 28, 28, 1])
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.dropout(x, training=is_training)
        x = self.out(x)
        if not is_training:
            # tf cross entropy expect logits without softmax, so only
            # apply softmax when not training.
            x = tf.nn.softmax(x)
        return x

model = MyModel()

loss_object = keras.losses.SparseCategoricalCrossentropy()
optimizer = keras.optimizers.Adam()

@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

"""
# 保存模型参数
# 2.1 不限制 checkpoint 文件个数
EPOCHS = 5

checkpoint = tf.train.Checkpoint(myAwesomeModel=model)
for epoch in range(EPOCHS):
    for images, labels in train_ds:
        train_step(images, labels)
    path = checkpoint.save('./save/model.ckpt')
    print("model saved to %s" % path)
"""

# 2.2 限制 checkpoint 文件个数
EPOCHS = 5

checkpoint = tf.train.Checkpoint(myAwesomeModel=model)
manager = tf.train.CheckpointManager(checkpoint, directory='./save', max_to_keep=3)
for epoch in range(EPOCHS):
    for batch,(images, labels) in enumerate(train_ds):
        print(batch)
        train_step(images, labels)
    path = manager.save(checkpoint_number=epoch)
    print("model saved to %s" % path)


# 加载模型参数并进行测试
model_to_be_restored = MyModel()
checkpoint = tf.train.Checkpoint(myAwesomeModel=model_to_be_restored)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
for test_images, test_labels in test_ds:
    y_pred = np.argmax(model_to_be_restored.predict(test_images), axis=-1)
    print("test accuracy: %f" % (sum(tf.cast(y_pred == test_labels, tf.float32)) / x_test.shape[0]))



模型的存取

"""
保存模型方式一:
tf.keras提供了使用HDF5标准提供基本的保存格式
这种方法保存了以下内容:
      1)模型权重值
      2)模型结构
      3)模型/优化器配置
"""
model. Save("./my_model.h5")
 
#将模型加载出来---可以直接进行预测
save_model = tf.keras.models.load_model("./my_model.h5")
"""
保存模型方式二:
仅仅保存模型结构----这种方式要将模型结构保存成json格式,仅仅保存模型的结构,优化器、损失函数都未指定
"""
model_jsons = model.to_json()
 
#将该json文件写入到磁盘
with open("./model_structure.json","w") as my_writer:
    my_writer.write(model_jsons)
 
#将以json文件保存的结构加载出来
with open("./model_structure.json","r") as my_reader:
    model_structure = my_reader.read()
 
new_model_structure = tf.keras.models.model_from_json(model_structure)
"""
保存方式三:
仅保存权重,有时我们只需要保存模型的状态(其权重值),而对模型架构不感兴趣。在这种情况下,
可以通过get_weights()获取权重值,并通过set_weights()设置权重值
"""
model_weights = model.get_weights()
 
#使用第二种模式只加载出模型的结构
new_model_structure2 = tf.keras.models.model_from_json(model_structure)
 
new_model_structure2.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])
 
#测试该模型的分数,即训练程度
new_model_structure2.set_weights(model_weights)
new_model_structure2.evaluate(test_images,test_labels)
 
"""
保存方式四:
在训练过程中,保存检查点,在训练期间或者训练结束的时候自动保存检查点。这样一来,在训练中断了后,
可以从该检查点继续向下训练。
使用的回调函数:tf.keras.callbacks.ModelCheckpoint()
"""
checkpoint_path = "./model.ckpt"
 
check_point_callback = tf.keras.callbacks.ModelCheckpoint(filepath = checkpoint_path,
                                   save_weights_only = True)
 
#添加fit的callbacks中,这步切记不能忘,这样在训练的时候可以自动帮我们保存参数的检查点
model2.fit(dataset, epochs=5, steps_per_epoch=steps_per_epoch,callbacks=[check_point_callback])
 
"""
加载检查点,继续开始训练
"""
#加载检查点的权重
new_model_structure3.load_weights(checkpoint_path)
 
new_model_structure3.evaluate(test_images,test_labels)

模型的检查点的保存和恢复

 方式1  不限次次数保存

"""
保存模型方式五:
自定义训练保存模型
"""
 
"""
第一步:创建检查点保存路径
"""
cp_dir = "./custom_train_save"
cp_profix = os.path.join(cp_dir,"ckpt")
 
 
"""
第二步:创建模型检查点对象
"""
check_point = tf.train.Checkpoint(optimizer = optimizers,
                                  model = model)
 
"""
第三步:在自定义训练函数中保存检查点
"""
if step % 2 == 0:
    check_point.save(file_prefix = cp_profix)
"""
第一步:提取最新的检查点
"""
latestnew_checkpoint = tf.train.latest_checkpoint(cp_dir)
 
"""
第二步:创建模型检查点对象
注意:这个optimizers与model属于新创建的,还没有加载参数.
"""
check_point = tf.train.Checkpoint(optimizer = optimizers,
                                  model = model)
 
"""
第三步:开始恢复到最新检查点处
"""
check_point.restore(latestnew_checkpoint)

方式2 通过checkpointManager

""
第一步:
创建检查点对象,并将模型(参数)、优化器等配置到检查点对象中
""
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net, iterator=iterator)
""
第二步:
创建检查点管理器对象,它可以帮我们管理检查点对象
""
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
""
第三步:
在训练函数中,设置多少轮保存一下检查点,返回值为保存路径
""
save_path = manager.save()

恢复

""
第一步:
创建优化器、模型对象
""
opt = tf.keras.optimizers.Adam(0.1)
net = Net()
""
第二步:
创建检查点、检查点管理器对象
""
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=opt, net=net, iterator=iterator)
manager = tf.train.CheckpointManager(ckpt, './tf_ckpts', max_to_keep=3)
""
第三步:
在训练前,恢复检查点
""
ckpt.restore(manager.latest_checkpoint)

import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import numpy as np
mnist = keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype(np.float32)
x_test = x_test[..., tf.newaxis].astype(np.float32)

train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(x_test.shape[0])

class MyModel(keras.Model):
    # Set layers.
    def __init__(self):
        super(MyModel, self).__init__()
        # Convolution Layer with 32 filters and a kernel size of 5.
        self.conv1 = layers.Conv2D(32, kernel_size=5, activation=tf.nn.relu)
        # Max Pooling (down-sampling) with kernel size of 2 and strides of 2.
        self.maxpool1 = layers.MaxPool2D(2, strides=2)

        # Convolution Layer with 64 filters and a kernel size of 3.
        self.conv2 = layers.Conv2D(64, kernel_size=3, activation=tf.nn.relu)
        # Max Pooling (down-sampling) with kernel size of 2 and strides of 2.
        self.maxpool2 = layers.MaxPool2D(2, strides=2)

        # Flatten the data to a 1-D vector for the fully connected layer.
        self.flatten = layers.Flatten()

        # Fully connected layer.
        self.fc1 = layers.Dense(1024)
        # Apply Dropout (if is_training is False, dropout is not applied).
        self.dropout = layers.Dropout(rate=0.5)

        # Output layer, class prediction.
        self.out = layers.Dense(10)

    # Set forward pass.
    def call(self, x, is_training=False):
        x = tf.reshape(x, [-1, 28, 28, 1])
        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.flatten(x)
        x = self.fc1(x)
        x = self.dropout(x, training=is_training)
        x = self.out(x)
        if not is_training:
            # tf cross entropy expect logits without softmax, so only
            # apply softmax when not training.
            x = tf.nn.softmax(x)
        return x

model = MyModel()

loss_object = keras.losses.SparseCategoricalCrossentropy()
optimizer = keras.optimizers.Adam()

@tf.function
def train_step(images, labels):
    with tf.GradientTape() as tape:
        predictions = model(images)
        loss = loss_object(labels, predictions)
    gradients = tape.gradient(loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

"""
# 保存模型参数
# 2.1 不限制 checkpoint 文件个数
EPOCHS = 5

checkpoint = tf.train.Checkpoint(myAwesomeModel=model)
for epoch in range(EPOCHS):
    for images, labels in train_ds:
        train_step(images, labels)
    path = checkpoint.save('./save/model.ckpt')
    print("model saved to %s" % path)
"""

# 2.2 限制 checkpoint 文件个数
EPOCHS = 5

checkpoint = tf.train.Checkpoint(myAwesomeModel=model)
manager = tf.train.CheckpointManager(checkpoint, directory='./save', max_to_keep=3)
for epoch in range(EPOCHS):
    for batch,(images, labels) in enumerate(train_ds):
        print(batch)
        train_step(images, labels)
    path = manager.save(checkpoint_number=epoch)
    print("model saved to %s" % path)


# 加载模型参数并进行测试
model_to_be_restored = MyModel()
checkpoint = tf.train.Checkpoint(myAwesomeModel=model_to_be_restored)
checkpoint.restore(tf.train.latest_checkpoint('./save'))
for test_images, test_labels in test_ds:
    y_pred = np.argmax(model_to_be_restored.predict(test_images), axis=-1)
    print("test accuracy: %f" % (sum(tf.cast(y_pred == test_labels, tf.float32)) / x_test.shape[0]))



 

参考文献:

TF2.0使用预训练网络与模型存取_tf_session = k.get_session() cmodel = vgg16(includ_我会像蜗牛一样努力的博客-CSDN博客

Tensorflow2.0之模型权值的保存与恢复(Checkpoint)_cofisher的博客-CSDN博客 

TensorFlow 2.0 模型的保存和恢复_爱吃雪糕的鱼的博客-CSDN博客

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/680432.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Mysql复习多表查询

Mysql复习多表查询 1.多表关系2.多表查询概述3.内连接4. 外连接5. 自连接5.1 案例 6. 子查询6.1 标量子查询6.1.1 标量子查询案例 6.1 列子查询6.2 行子查询6.2.1 demo1 6.3 表子查询6.3.1 demo16.3.2 demo2 7.联合查询8.1 案例 附录 1.多表关系 >多表查询 项目开发中&…

【算法】代码随想录、数组——长度最小的子数组、滑动窗口实现

209.长度最小的子数组 解法思想来自代码随想录:209.长度最小的子数组 (1)暴力解法 我们暴力解法直接使用两个for循环,然后不断的遍历寻找符合条件的子序列; 初始化长度变量length和结果变量result为0和int类型最大数…

近期离职心情记录 大不了前端换行!

提了离职了,发消息给领导的时候我都不敢看,发了马上关闭了聊天框当乌龟。。。 一、大致背景介绍 现在在二线城市的上市公司,本人大专学历,学的是java(甚至还报名了培训班)。第一个公司是现在公司的外包公司…

管理类联考——逻辑——知识篇——论证推理——七、论证方式——haimian

论证方式 考点分析 削弱 年度 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023题量211111 题型分类 关键问题 题型特征 典型问法: 为了评价上述论证的正确性,回答以下哪个问题最为重要以下哪项对于评价上述结论最为重要? 思路点拨…

【IMX6ULL驱动开发学习】10.设置uboot使用网络加载zImage和dtb

1. 在uboot中设置网络 首先启动自己的板子,使用 ifconfig 命令或者 ifconfig -a 命令查看自己网卡的地址 ifconfig ifconfig -a我的网卡地址是这个 52:15:66:2E:16:71接着设置自己的ip setenv ipaddr 192.168.1.100设置服务器ip setenv serverip 192.168.1.200保…

如何系列 如何在单机和集群环境保证代码线程安全

文章目录 什么是线程安全什么场景下会发生如何保证线程安全单机环境1.无状态设计2.使用final关键字(不可变)3.使用synchronized关键字4.使用volatile关键字5.使用java.util.concurrent.atomic包中的原子包装类6.使用java.util.concurrent.locks包中的锁7…

【ROS2】(导航-05)在 Navigation-ROS 2上加载地图

一、说明 如何建立ROS2的导航地图?在ROS2的服务器中,其中有一个地图服务器,该服务器与参数服务器类同,当外界节点请求的时候,将新的地图导入导航系统中。注意,这里新的地图就是因为场景足够大时&#xff0c…

Docker常见使用

Docker常见使用 1、Docker安装 ## 下载阿里源repo文件 $ curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo $ curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo$ yum clean …

Linux系统编程(多进程编程深入2)

文章目录 前言一、进程退出状态二、进程退出状态宏的使用三、wait的局限性四、waitpid函数讲解五、使用fork函数创建两次孙进程解决子问题总结 前言 本篇文章继续讲解多线程编程。 一、进程退出状态 进程退出状态(Exit Status)指的是进程在终止时返回…

HTML中的常用标签用法

作者:爱塔居 专栏:javaEE 作者简介:大三学生,希望和大家一起进步 目录 一、注释标签 二、标题标签:h1-h6 三、段落标签:p 四、换行标签:br 五、格式化标签 六、图片标签:img 七、超链…

快速排序到底有多快

作者主页:paper jie的博客_CSDN博客-C语言,算法详解领域博主 本文作者:大家好,我是paper jie,感谢你阅读本文,欢迎一建三连哦。 本文录入于《算法详解》专栏,本专栏是针对于大学生,编程小白精心…

ECS服务器上搭建一个Java开发环境

ECS服务器上搭建一个Java开发环境 本步骤将在ECS服务器上搭建一个Java开发环境,包括OpenJDK 1.8和Maven 3.6.3,并配置阿里云镜像仓库。 执行如下命令,安装OpenJDK 1.8。 yum -y install java-1.8.0-openjdk-devel.x86_64执行如下命令&…

【NLP】用python实现文本转语音处理

一、说明 介绍一款python调用库,离线软件包pyttsx3 API,它能够将文字转化成语音文件。Python 中有多种 API 可用于将文本转换为语音。pyttsx3 是一个非常易于使用的工具,可将输入的文本转换为音频。与其它类似的库不同,它可以离线…

AutoSAR系列讲解(入门篇)2.2-SWC的类型(APPL)

SWC的类型 一、原子级的SWC(Atomic SWC) 二、集合级的SWC(Composition SWC) 三、特殊的SWC 一、原子级的SWC(Atomic SWC) 原子级的SWC(Atomic SWC):故名思意&#xff…

git 的基本操作

1. git建立本地仓库 在想要建立的目录下输入命令 git init 我们可以看一下 .git目录下有什么 2. 配置git本地仓库 配置用户的 name 和 email 命令:git config [...] 配置完后,我们像查看一下 刚才的配置 2.1 查看配置命令 git config -l 2.2 删除…

【CS144-2021】Stanford 计算机网络课程学习

CS144 2019 Fall:https://kangyupl.gitee.io/cs144.github.io/2020 Fall:https://github.com/lawliet9712/Stanford-CS144-2021【推荐】2021 Fall:https://github.com/Kiprey/sponge 前前后后弄了半个月,终于啃完 CS144 了&#…

VLAN基础知识3_VLAN间三层通信(单臂路由)

目录 1.VLAN单臂路由简介 2.基于单臂路由VLAN间三层通信原理 3.VLAN间三层通信单臂路由实验 3.1 常用配置命令 3.2 实验配置步骤 3.3实验效果 1.VLAN单臂路由简介 单臂路由(One-Arm Router)是一种网络拓扑结构,其中一个路由器的一个接…

动手写一个 Java JWT Token 生成组件

OAuth2 中默认使用 Bearer Tokens (一般用 UUID 值)作为 token 的数据格式,但也支持升级使用 JSON Web Token(JWT) 来作为 token 的数据格式。实际来说,OAuth 规范中并无限制 Token 采取何种格式。今天我们就采用 JWT 来作为 Token,它的一个好…

四种缓存的避坑总结

背景 分布式、缓存、异步和多线程被称为互联网开发的四大法宝。今天我总结一下项目开发中常接触的四种缓存实际项目中遇到过的问题。 JVM堆内缓存 JVM堆内缓存因为可以避免memcache、redis等集中式缓存网络通信故障问题,目前还在项目中广泛使用。 堆内缓存需要注…

FFmpeg5.0源码阅读——avformat_find_stream_info

摘要:在使用FFmpeg库时通常使用avformat_find_stream_info相关函数来探测流的基本信息,为了更加深入理解FFmpeg的基本流程,本文根据FFmpeg 5.0的源码详细描述了该函数的具体实现。   关键字:FFmpeg   读者须知:读者…