Linux内核文件写入流程

news2024/7/2 3:43:16

文本代码基于Linux 5.15 。

当用户层调用write 去做写入, linux 内核里面是如何处理的? 本文以exfat 为例, 讨论这个流程

入口函数

write 系统调用的定义如下:

fs/read_write.c
ssize_t ksys_write(unsigned int fd, const char __user *buf, size_t count)
{
    struct fd f = fdget_pos(fd);
    ssize_t ret = -EBADF;

    if (f.file) {
        loff_t pos, *ppos = file_ppos(f.file);
        if (ppos) {
            pos = *ppos;
            ppos = &pos;
        }
        ret = vfs_write(f.file, buf, count, ppos);
        if (ret >= 0 && ppos)
            f.file->f_pos = pos;
        fdput_pos(f);
    }

    return ret;
}

SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
        size_t, count)
{
    return ksys_write(fd, buf, count);
}

主要是调用了ksys_write , 然后再调用vfs_write

fs/read_write.c
 ksys_write->vfs_write
ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
{
    ssize_t ret;

    if (!(file->f_mode & FMODE_WRITE))
        return -EBADF;
    if (!(file->f_mode & FMODE_CAN_WRITE))
        return -EINVAL;
    if (unlikely(!access_ok(buf, count)))
        return -EFAULT;

    ret = rw_verify_area(WRITE, file, pos, count);
    if (ret)
        return ret;
    if (count > MAX_RW_COUNT)
        count =  MAX_RW_COUNT;
    file_start_write(file);
    if (file->f_op->write)
        ret = file->f_op->write(file, buf, count, pos);
    else if (file->f_op->write_iter)
        ret = new_sync_write(file, buf, count, pos);
    else
        ret = -EINVAL;
    if (ret > 0) {
        fsnotify_modify(file);
        add_wchar(current, ret);
    }
    inc_syscw(current);
    file_end_write(file);
    return ret;
}

 ksys_write->vfs_write->new_sync_write
static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
{
    struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
    struct kiocb kiocb;
    struct iov_iter iter;
    ssize_t ret;

    init_sync_kiocb(&kiocb, filp);
    kiocb.ki_pos = (ppos ? *ppos : 0);
    iov_iter_init(&iter, WRITE, &iov, 1, len);

    ret = call_write_iter(filp, &kiocb, &iter);
    BUG_ON(ret == -EIOCBQUEUED);
    if (ret > 0 && ppos)
        *ppos = kiocb.ki_pos;
    return ret;
}

include/linux/fs.h
 ksys_write->vfs_write->new_sync_write->call_write_iter->write_iter
static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio,
                      struct iov_iter *iter)
{
    return file->f_op->write_iter(kio, iter);
}

可以看到, 最终会调用到f_op->write_iter这个回调。

在exfat文件系统中, 这个回调被实现为generic_file_write_iter

fs/exfat/file.c
const struct file_operations exfat_file_operations = {
    .llseek     = generic_file_llseek,
    .read_iter  = generic_file_read_iter,
    .write_iter = generic_file_write_iter,
    .mmap       = generic_file_mmap,
    .fsync      = exfat_file_fsync,
    .splice_read    = generic_file_splice_read,
    .splice_write   = iter_file_splice_write,
};

generic_file_write_iter 最终会调用到generic_perform_write 执行真正的写入操作:

mm/filemap.c
ksys_write->vfs_write->new_sync_write->call_write_iter->`generic_file_write_iter->__generic_file_write_iter->generic_perform_write
ssize_t generic_perform_write(struct file *file,
                struct iov_iter *i, loff_t pos)
{
    struct address_space *mapping = file->f_mapping;
    const struct address_space_operations *a_ops = mapping->a_ops;
    long status = 0;
    ssize_t written = 0;
    unsigned int flags = 0;

    do {                                                /*         1         */
        struct page *page;
        unsigned long offset;   /* Offset into pagecache page */
        unsigned long bytes;    /* Bytes to write to page */
        size_t copied;      /* Bytes copied from user */
        void *fsdata;

        offset = (pos & (PAGE_SIZE - 1));
        bytes = min_t(unsigned long, PAGE_SIZE - offset,
                        iov_iter_count(i));

again:
        /*
         * Bring in the user page that we will copy from _first_.
         * Otherwise there's a nasty deadlock on copying from the
         * same page as we're writing to, without it being marked
         * up-to-date.
         *
         * Not only is this an optimisation, but it is also required
         * to check that the address is actually valid, when atomic
         * usercopies are used, below.
         */
        if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
            status = -EFAULT;
            break;
        }

        if (fatal_signal_pending(current)) {
            status = -EINTR;
            break;
        }

        status = a_ops->write_begin(file, mapping, pos, bytes, flags,
                        &page, &fsdata);                   /*         2         */
        if (unlikely(status < 0))
            break;

        if (mapping_writably_mapped(mapping))
            flush_dcache_page(page);

        copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); /*         3         */
        flush_dcache_page(page);

        status = a_ops->write_end(file, mapping, pos, bytes, copied,
                        page, fsdata);            /*         4         */
        if (unlikely(status < 0))
            break;
        copied = status;

        cond_resched();

        iov_iter_advance(i, copied);
        if (unlikely(copied == 0)) {
            /*
             * If we were unable to copy any data at all, we must
             * fall back to a single segment length write.
             *
             * If we didn't fallback here, we could livelock
             * because not all segments in the iov can be copied at
             * once without a pagefault.
             */
            bytes = min_t(unsigned long, PAGE_SIZE - offset,
                        iov_iter_single_seg_count(i));
            goto again;
        }
        pos += copied;
        written += copied;

        balance_dirty_pages_ratelimited(mapping);
    } while (iov_iter_count(i));

    return written ? written : status;
}
EXPORT_SYMBOL(generic_perform_write);

(1) 执行循环, 直到所有的内容写入完成

(2) 调用a_ops->write_begin 回调, 被具体文件系统用来执行相关的准备

(3) 把需要写的内容写到page cache

(4) 写入完成, 调用a_ops->write_end处理后续的善后工作

可以看到, 主要是write_beginwrite_end 这两个调用, 下面分别来说明这两个函数

通知为写请求做准备

在exfat文件系统中, write_begin 的实现如下:

fs/exfat/inode.c
ksys_write->vfs_write->new_sync_write->call_write_iter->`generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_begin
static int exfat_write_begin(struct file *file, struct address_space *mapping,
        loff_t pos, unsigned int len, unsigned int flags,
        struct page **pagep, void **fsdata)
{
    int ret;

    *pagep = NULL;
    ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
                   exfat_get_block,
                   &EXFAT_I(mapping->host)->i_size_ondisk);

    if (ret < 0)
        exfat_write_failed(mapping, pos+len);

    return ret;
}

主要是对cont_write_begin的封装

fs/buffer.c
ksys_write->vfs_write->new_sync_write->call_write_iter->`generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_begin->cont_write_begin
int cont_write_begin(struct file *file, struct address_space *mapping,
            loff_t pos, unsigned len, unsigned flags,
            struct page **pagep, void **fsdata,
            get_block_t *get_block, loff_t *bytes)
{
    struct inode *inode = mapping->host;
    unsigned int blocksize = i_blocksize(inode);
    unsigned int zerofrom;
    int err;

    err = cont_expand_zero(file, mapping, pos, bytes);     /*          1       */
    if (err)
        return err;

    zerofrom = *bytes & ~PAGE_MASK;
    if (pos+len > *bytes && zerofrom & (blocksize-1)) {
        *bytes |= (blocksize-1);
        (*bytes)++;
    }

    return block_write_begin(mapping, pos, len, flags, pagep, get_block); /*          2       */
}

(1) 如果存在文件空洞, 这需要把空洞的地方填0, 这一点后面详细解释

(2) 这里面主要是建立buffer_head的隐射, 并且做必要的预读。

cont_expand_zero

考虑这种情况, 一个文件的大小是1M , 但用户写的时候, 先seek 到2M的offset, 再进行写入。 那么中间的1M部分即成为了所谓的“文件空洞”, 对应这种空洞, exFAT 会用0去填充对应的位置, 这样如果读取到对应的位置, 读到的就是0。

需要说明的是, 如果文件大小是1M , 用户如果从1M的地方开始写入, 这时候cont_expand_zero 会直接返回, 直接调用到block_write_begin 这个函数。

fs/buffer.c
ksys_write->vfs_write->new_sync_write->call_write_iter->generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_begin->cont_write_begin->cont_expand_zero
static int cont_expand_zero(struct file *file, struct address_space *mapping,
                loff_t pos, loff_t *bytes)
{
    struct inode *inode = mapping->host;
    unsigned int blocksize = i_blocksize(inode);
    struct page *page;
    void *fsdata;
    pgoff_t index, curidx;
    loff_t curpos;
    unsigned zerofrom, offset, len;
    int err = 0;

    index = pos >> PAGE_SHIFT;
    offset = pos & ~PAGE_MASK;

    while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) {  /*    1    */
        zerofrom = curpos & ~PAGE_MASK;
        if (zerofrom & (blocksize-1)) {
            *bytes |= (blocksize-1);
            (*bytes)++;
        }
        len = PAGE_SIZE - zerofrom;

        err = pagecache_write_begin(file, mapping, curpos, len, 0,
                        &page, &fsdata);                    /*    2    */
        if (err)
            goto out;
        zero_user(page, zerofrom, len);                     /*    3    */
        err = pagecache_write_end(file, mapping, curpos, len, len,
                        page, fsdata);                      /*    4    */
        if (err < 0)
            goto out;
        BUG_ON(err != len);
        err = 0;

        balance_dirty_pages_ratelimited(mapping);

        if (fatal_signal_pending(current)) {
            err = -EINTR;
            goto out;
        }
    }

    /* page covers the boundary, find the boundary offset */
    if (index == curidx) {                                          /*    5    */
        zerofrom = curpos & ~PAGE_MASK;
        /* if we will expand the thing last block will be filled */
        if (offset <= zerofrom) {
            goto out;
        }
        if (zerofrom & (blocksize-1)) {
            *bytes |= (blocksize-1);
            (*bytes)++;
        }
        len = offset - zerofrom;

        err = pagecache_write_begin(file, mapping, curpos, len, 0,
                        &page, &fsdata);
        if (err)
            goto out;
        zero_user(page, zerofrom, len);
        err = pagecache_write_end(file, mapping, curpos, len, len,
                        page, fsdata);
        if (err < 0)
            goto out;
        BUG_ON(err != len);
        err = 0;
    }
out:
    return err;
}

(1) 对于超出部分的页面, 全部填充0。 按照上面的例子, size=1m, offset=2m, 那么中间1m的部分要全部填充0.

(2) pagecache_write_begin实际上是调用aops->write_begin 。 这里又会调用到cont_write_begin , 只不过现在调用到这个函数的时候, size=1m, offset=1m, cont_expand_zero 直接返回, 会直接到block_write_begin, 分配对应的空间, 并读取对应的内容到page中。

(3) 将读取到的page 清0。这样下次read的时候,如果读到了这个位置, 那么就会返回0.

(4) pagecache_write_end 实际上调用了aops->write_end

(5) 上面是处理不在一个page 中的情况,但一个page 大小是4k。 还有一种情况是空洞发生在page 内, 比如size=512, offset =1024, 这样中间512Byte就需要清零。 zerofrom代表页内偏移, 需要把zerofromoffset这部分内容清零。

block_write_begin

这个函数做的事情就是把建立page与磁盘内容的映射关系, 如果文件长度不够, 还会进行块分配; 另外,在必要时, 还是进行预读操作。(以下内容主要参考《存储技术原理分析》)

我们知道, 对于一个page而言, 它是通过buffer_head 去管理具体的内容。一般来说, 一个page(4k) 会有8个buffer_head(512B)。这样某个buffer_head (block_start->block_end) 与要写入的地址(from->to)存在如上六种情况。

前两种情况, 这个buffer_head 落在要写的数据之外; 接下来三种情况, 属于非覆盖写的情况, 这几种情况都需要进行预读, 将对应buffer_head的内容先读出来; 最后一种情况, 是覆盖写的情况, 不需要进行预读, 因为要写的内容完全覆盖了buffer_head的区域, 这时候直接写入就好了, 没必要先读出来。

理解了上面几种情况, 我们再来看看这个函数的具体实现。

fs/buffer.c
  ksys_write->vfs_write->new_sync_write->call_write_iter->generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_begin->cont_write_begin->block_write_begin
int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
        unsigned flags, struct page **pagep, get_block_t *get_block)
{
    pgoff_t index = pos >> PAGE_SHIFT;
    struct page *page;
    int status;

    page = grab_cache_page_write_begin(mapping, index, flags);   /*    1     */
    if (!page)
        return -ENOMEM;

    status = __block_write_begin(page, pos, len, get_block);    /*     2     */
    if (unlikely(status)) {
        unlock_page(page);
        put_page(page);
        page = NULL;
    }

    *pagep = page;
    return status;
}

(1) 获取对应文件位置的page。

(2) 调用__block_write_begin 执行真正的操作

fs/buffer.c
 ksys_write->vfs_write->new_sync_write->call_write_iter->generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_begin->cont_write_begin->block_write_begin->__block_write_begin->__block_write_begin_int
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
        get_block_t *get_block)
{
    return __block_write_begin_int(page, pos, len, get_block, NULL);
}

int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
        get_block_t *get_block, struct iomap *iomap)
{
    unsigned from = pos & (PAGE_SIZE - 1);
    unsigned to = from + len;
    struct inode *inode = page->mapping->host;
    unsigned block_start, block_end;
    sector_t block;
    int err = 0;
    unsigned blocksize, bbits;
    struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;

    BUG_ON(!PageLocked(page));
    BUG_ON(from > PAGE_SIZE);
    BUG_ON(to > PAGE_SIZE);
    BUG_ON(from > to);

    head = create_page_buffers(page, inode, 0);
    blocksize = head->b_size;
    bbits = block_size_bits(blocksize);

    block = (sector_t)page->index << (PAGE_SHIFT - bbits);

    for(bh = head, block_start = 0; bh != head || !block_start;
        block++, block_start=block_end, bh = bh->b_this_page) {  /*       1        */
        block_end = block_start + blocksize;
        if (block_end <= from || block_start >= to) {         /*       2        */
            if (PageUptodate(page)) {
                if (!buffer_uptodate(bh))
                    set_buffer_uptodate(bh);
            }
            continue;
        }
        if (buffer_new(bh))
            clear_buffer_new(bh);
        if (!buffer_mapped(bh)) {                    /*       3        */
            WARN_ON(bh->b_size != blocksize);
            if (get_block) {
                err = get_block(inode, block, bh, 1);       /*       4        */
                if (err)
                    break;
            } else {
                iomap_to_bh(inode, block, bh, iomap);
            }

            if (buffer_new(bh)) {
                clean_bdev_bh_alias(bh);
                if (PageUptodate(page)) {
                    clear_buffer_new(bh);
                    set_buffer_uptodate(bh);
                    mark_buffer_dirty(bh);
                    continue;
                }
                if (block_end > to || block_start < from)   /*       5        */
                    zero_user_segments(page,
                        to, block_end,
                        block_start, from);
                continue;
            }
        }
        if (PageUptodate(page)) {
            if (!buffer_uptodate(bh))
                set_buffer_uptodate(bh);
            continue; 
        }
        if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
            !buffer_unwritten(bh) &&
             (block_start < from || block_end > to)) {         /*       6        */
            ll_rw_block(REQ_OP_READ, 0, 1, &bh);
            *wait_bh++=bh;
        }
    }
    /*
     * If we issued read requests - let them complete.
     */
    while(wait_bh > wait) {                         /*       7        */
        wait_on_buffer(*--wait_bh);
        if (!buffer_uptodate(*wait_bh))
            err = -EIO;
    }
    if (unlikely(err))
        page_zero_new_buffers(page, from, to);
    return err;
}

(1) 循环处理每个buffer_head, 这个循环推出的条件是, bh == head, 即所有buffer_head遍历完。 这里有一个小疑问,为什么不再block_start > to 的时候就推出循环?

(2) 这里对应上述前两种情况, 这时候直接跳过

(3) (4) 如果buffer_head 没有建立映射关系, 调用get_block映射到具体的文件系统的sector

(5) 对于③④⑤中情况, 调用zero_user_segments 先将非覆盖写的部分清零

(6) 对于③④⑤中情况, 需要进行预读, 调用ll_rw_block, 读取对应的部分

(7) 等待(6)的读取完成

通知数据已写入完成

a_ops->write_end 在数据从用户buf复制到page cache 之后, 被具体文件系统用来做相关的善后。

对应exfat, 这个函数的被实例化为

fs/exfat/inode.c
static int exfat_write_end(struct file *file, struct address_space *mapping,
        loff_t pos, unsigned int len, unsigned int copied,
        struct page *pagep, void *fsdata)
{
    struct inode *inode = mapping->host;
    struct exfat_inode_info *ei = EXFAT_I(inode);
    int err;

    err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);

    if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) {
        exfat_fs_error(inode->i_sb,
            "invalid size(size(%llu) > aligned(%llu)\n",
            i_size_read(inode), EXFAT_I(inode)->i_size_aligned);
        return -EIO;
    }

    if (err < len)
        exfat_write_failed(mapping, pos+len);

    if (!(err < 0) && !(ei->attr & ATTR_ARCHIVE)) {
        inode->i_mtime = inode->i_ctime = current_time(inode);
        ei->attr |= ATTR_ARCHIVE;
        mark_inode_dirty(inode);
    }

    return err;
}

主要调用了generic_write_end 处理具体的工作:

fs/buffer.c
 ksys_write->vfs_write->new_sync_write->call_write_iter->generic_file_write_iter->__generic_file_write_iter->generic_perform_write->exfat_write_end->generic_write_end
int generic_write_end(struct file *file, struct address_space *mapping,
            loff_t pos, unsigned len, unsigned copied,
            struct page *page, void *fsdata)
{
    struct inode *inode = mapping->host;
    loff_t old_size = inode->i_size;
    bool i_size_changed = false;

    copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);

    /*
     * No need to use i_size_read() here, the i_size cannot change under us
     * because we hold i_rwsem.
     *
     * But it's important to update i_size while still holding page lock:
     * page writeout could otherwise come in and zero beyond i_size.
     */
    if (pos + copied > inode->i_size) {
        i_size_write(inode, pos + copied);
        i_size_changed = true;
    }

    unlock_page(page);
    put_page(page);

    if (old_size < pos)
        pagecache_isize_extended(inode, old_size, pos);
    /*
     * Don't mark the inode dirty under page lock. First, it unnecessarily
     * makes the holding time of page lock longer. Second, it forces lock
     * ordering of page lock and transaction start for journaling
     * filesystems.
     */
    if (i_size_changed)
        mark_inode_dirty(inode);
    return copied;
}
EXPORT_SYMBOL(generic_write_end);

这里主要介绍一下block_write_end 这个函数:

fs/buffer.c
int block_write_end(struct file *file, struct address_space *mapping,
            loff_t pos, unsigned len, unsigned copied,
            struct page *page, void *fsdata)
{
    struct inode *inode = mapping->host;
    unsigned start;

    start = pos & (PAGE_SIZE - 1);

    if (unlikely(copied < len)) {                     /*          1          */
        /*
         * The buffers that were written will now be uptodate, so we
         * don't have to worry about a readpage reading them and
         * overwriting a partial write. However if we have encountered
         * a short write and only partially written into a buffer, it
         * will not be marked uptodate, so a readpage might come in and
         * destroy our partial write.
         *
         * Do the simplest thing, and just treat any short write to a
         * non uptodate page as a zero-length write, and force the
         * caller to redo the whole thing.
         */
        if (!PageUptodate(page))
            copied = 0;

        page_zero_new_buffers(page, start+copied, start+len);
    }
    flush_dcache_page(page);

    /* This could be a short (even 0-length) commit */
    __block_commit_write(inode, page, start, start+copied); /*          2          */

    return copied;
}

这里有一种特殊的情况需要处理, 即实际写入长度copied < len, 为此专门定义了一个术语: 短写(short write)。

(以下内容主要参考《存储技术原理分析》)

对于完全覆盖写的情况,是没有进行预读, 这样如果没有进行完全覆盖写, 就会出现这个buffer_head中一部分内容是不确定, 既没有从磁盘中读出, 又没有从用户空间更新, 如果将这部分数据贸然更新到磁盘, 势必覆盖原来的有效数据。

因此最简单的处理, 是认为这种场景下没有写入(copied = 0), 然后让调用者重新做整个事情。

下面介绍一下__block_commit_write 这个函数,

static int __block_commit_write(struct inode *inode, struct page *page,
        unsigned from, unsigned to)
{
    unsigned block_start, block_end;
    int partial = 0;
    unsigned blocksize;
    struct buffer_head *bh, *head;

    bh = head = page_buffers(page);
    blocksize = bh->b_size;

    block_start = 0;
    do {                                                /*          1          */
        block_end = block_start + blocksize;
        if (block_end <= from || block_start >= to) {   /*          2          */
            if (!buffer_uptodate(bh))
                partial = 1;
        } else {
            set_buffer_uptodate(bh);
            mark_buffer_dirty(bh);
        }
        clear_buffer_new(bh);

        block_start = block_end;
        bh = bh->b_this_page;
    } while (bh != head);

    /*
     * If this is a partial write which happened to make all buffers
     * uptodate then we can optimize away a bogus readpage() for
     * the next read(). Here we 'discover' whether the page went
     * uptodate as a result of this (potentially partial) write.
     */
    if (!partial)                     /*          3          */
        SetPageUptodate(page);
    return 0;
}

(1) 遍历page中所有的buffer_head

(2) 这里和block_write_begin中一样,对应①②种情况, 对于不在范围内的buffer_head,设置partial = 1

(3) 如果!partitial, 即page中所有buffer_head都被处理, 设置SetPageUptodate。 这个是这个函数的关键操作。

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/617591.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

数据库期末复习(7.2)查询优化

查询优化的概述 商用数据库花了很多的资金投入到查询优化。 查询优化的分类 逻辑查询优化 物理查询优化 比逻辑查询计划多了怎么去执行的方式,为的是数据操作速度更快 逻辑查询优化的三种关键技术 在科学研究的道路上我们往往不是一帆风顺的,人的认识也是局限的,但是我…

SciencePub学术 | 计算机科学类重点SCIEI征稿中

SciencePub学术刊源推荐: 计算机科学类SCI&EI征稿中&#xff01;录用率高&#xff0c;自引率低&#xff0c;进展顺利。信息如下&#xff0c;录满为止&#xff1a; 一、期刊概况&#xff1a; 【期刊简介】IF&#xff1a;4.0-4.5↑&#xff0c; JCR 2区&#xff0c;中科院3区…

抖音seo源码·源代码搭建·支持二开(开源)系统

抖音seo源码&#xff0c;抖音seo系统&#xff0c;抖音搜索排名&#xff0c;源码系统开发 场景&#xff1a;公认的视频发布功能可是必备的&#xff0c;智能剪辑和智能客服更不用说&#xff0c;作为产品中粉丝转化的重要一环也是必不可少的 抖音seo源码开发&#xff0c;即抖音搜…

Firefox插件(拓展)开发

目录 0、一些概念 1、创建一个项目 2、创建内容脚本 3、将拓展临时添加到浏览器中进行测试 3-1、CtrlShiftA 或&#xff1a; 3-2、选择调试附加组件 3-3、选择临时加载附加组件 3-4、选择我们项目中的 manifest.json 文件打开 3-5、如果打开成功&#xff1a; 4、继续开…

【Java】深入理解Java虚拟机 | 垃圾收集器GC

《深入理解Java虚拟机》的阅读笔记——第三章 垃圾收集器与内存分配策略。 参考了JavaGuide网站的相关内容&#xff1a;https://javaguide.cn/ Q&#xff1a;哪些内存需要回收&#xff1f;什么时候回收&#xff1f;如何回收&#xff1f; 2 对象已死吗&#xff1f; 2.1 引用…

4种普遍的机器学习分类算法

朴素贝叶斯分类 朴素贝叶斯分类是基于贝叶斯定理与特征条件独立假设的分类方法&#xff0c;发源于古典数学理论&#xff0c;拥有稳定的数学基础和分类效率。它是一种十分简单的分类算法&#xff0c;当然简单并不一定不好用。通过对给出的待分类项求解各项类别的出现概率大小&a…

Linux驱动开发(使用I2C总线设备驱动模型编写AT24C02驱动程序)

文章目录 前言一、I2C总线设备驱动模型二、设备树编写三、驱动程序编写1.提供i2c_driver结构体变量并且注册2.注册file_operations结构体3.操作AT24C02 四、应用程序编写五、上机测试总结 前言 本篇文章将讲解如何使用I2C总线设备驱动模型编写AT24C02驱动程序。 一、I2C总线设…

Linux platform 设备驱动实验

目录 1. platform平台简介 1.1 platform总线 1.2 platform 驱动 1.3 platform设备 2.platform平台总线初始化 3. platform驱动框架 4.实验 4.1 无设备树的platform设备注册 4.2 无设备树的platform驱动 4.3 有设备树的platform驱动 1. platform平台简介 当我们向系统…

java设计模式(十二)代理模式

目录 定义模式结构角色职责代码实现静态代理动态代理jdk动态代理cglib代理 适用场景优缺点 定义 代理模式给某一个对象提供一个代理对象&#xff0c;并由代理对象控制对原对象的引用。说简单点&#xff0c;代理模式就是设置一个中间代理来控制访问原目标对象&#xff0c;以达到…

高度平衡二叉搜索树(AVLTree)(插入与旋转)

目录 简介 AVL的结点类 平衡因子的性质 AVL树的插入 更新平衡因子的接口(ChangeBf) 第一种情况&#xff1a;插入后父节点的平衡因子为0 第二种情况&#xff1a;更新后父节点的平衡因子的绝对值为1 第三种情况&#xff1a;更新后父节点的平衡因子的绝对值为2 旋转接口(…

cam_lidar_calibration代码详解(一)采样优化部分

目录 一、launch启动程序 1.1 run_optimiser.launch标定优化程序 1.2 assess_results.launch重投影误差评估程序 二、主要代码 2.1 feature_extraction_node.cpp文件 2.2 feature_extractor.cpp文件 2.2.1 FeatureExtractor::callback_camerainfo函数 2.2.2 serviceCB函…

QT快速操作Excel的实现介绍及操作类封装

QT中操作Excel还是比较简单的&#xff0c;Qt提供了QAxObject&#xff0c;包装COM组件的类&#xff0c;通过COM通过COM操作使用QAxObject类&#xff0c;使用此类&#xff0c;需要在pro文件中添加"QT   axcontainer "。 基本流程介绍 QAxObject&#xff1a; QAxObj…

SAP从入门到放弃系列之pMRP

最近学习pMRP&#xff0c;查了很多博客&#xff0c;机翻一篇内容非常详细的文章&#xff0c;感谢大佬&#xff1a; 原文地址&#xff1a; pMRP–Predictive Material and Resource Planning in SAP S/4HANA : Step by Step execution pMRP https://blogs.sap.com/2020/04/14/p…

机器学习【线性回归】

机器学习【线性回归】 回归预测的结果是离散型变量&#xff0c;身高和年龄 损失函数&#xff1a;SSE&#xff08;误差平方和&#xff09;&#xff0c;RSS&#xff08;残差平方和&#xff09;&#xff0c;误差越大越差 最小二乘法&#xff1a;通过最小化真实值和预测值之间的…

大数据:sparkSQL,历史,DataSet,DataFrame,sparkSession

大数据&#xff1a;sparkSQL 2022找工作是学历、能力和运气的超强结合体&#xff0c;遇到寒冬&#xff0c;大厂不招人&#xff0c;可能很多算法学生都得去找开发&#xff0c;测开 测开的话&#xff0c;你就得学数据库&#xff0c;sql&#xff0c;oracle&#xff0c;尤其sql要学…

第十章:创建和管理表

第十章&#xff1a;创建和管理表 10.1&#xff1a;基础知识 一条数据存储的过程 ​ 存储数据是处理数据的第一步。只有正确地把数据存储起来&#xff0c;我们才能进行有效的处理和分析。否则&#xff0c;只能是一团乱麻&#xff0c;无从下手。 ​ 在MySQL中&#xff0c;一个完…

使用模板方法模式封装协议消息

目录 整体框架 模板方法介绍 关于本案例设计 c impl惯用法 cimpl惯用法好处 此案例impl惯用法的设计 关于序列化和反序列化 序列化和反序列化 本项目使用介绍 谷歌测试 谷歌测试环境 谷歌测试用例 完整源码地址 概述 本文介绍了从 设计模式之模板方法模式协议消…

面对CPU狂飙时的5步解决方案

现在企业对后端开发的要求越来越高&#xff0c;不仅要求我们会写代码&#xff0c;还要我们能够进行部署和运维&#xff01; 项目上线并运行一段时间后&#xff0c;可能会发现部署所在的Linux服务器CPU占用过高&#xff0c;该如何排查解决&#xff1f; 本文用5步带你搞定线上CPU…

操作系统-进程和线程-同步、互斥、死锁

目录 一、同步互斥 二、互斥的实现方法 2.1软件实现 2.1.1单标志法 2.1.2双标志先检查 2.1.3双标志后检查 2.1.4Petersons算法 2.2硬件实现 2.2.1 TestAndSet指令 2.2.2 Swap指令 三、信号量机制 3.1整形变量 3.2 记录型变量 3.3用信号量实现进程互斥、同步、前驱关系…

Sui与F1甲骨文红牛车队达成合作

在近期达成的一项为期多年的合作协议中&#xff0c;甲骨文红牛车队将利用Sui网络开发&#xff0c;为粉丝带来全新的数字化体验。 甲骨文红牛车队的粉丝将很快在Sui网络上体验到他们最爱的一级方程式车队带来的激情。最近几个赛季一直统治着F1赛场的甲骨文红牛车队&#xff0c;与…