日志项目之——将kafka数据存入hbase中

news2024/9/22 9:51:47

目录

1.添加依赖

2.在hbase shell界面中分别输入下面的语句,创建namespace和表

3.UserFriendToHB

4.UsersToHB

5.TrainToHB

6.EventsToHB

7.EventAttendeToHb


1.添加依赖

<dependencies>
    <dependency>
      <groupId>junit</groupId>
      <artifactId>junit</artifactId>
      <version>4.11</version>
      <scope>test</scope>
    </dependency>
    <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka-clients -->
    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-clients</artifactId>
      <version>2.8.0</version>
    </dependency>
    <!-- https://mvnrepository.com/artifact/org.apache.kafka/kafka -->
    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka_2.12</artifactId>
      <version>2.8.0</version>
    </dependency>
    <dependency>
      <groupId>org.apache.kafka</groupId>
      <artifactId>kafka-streams</artifactId>
      <version>2.8.0</version>
    </dependency>
    <!--hbase相关依赖-->
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-client</artifactId>
      <version>2.3.5</version>
    </dependency>
    <dependency>
      <groupId>org.apache.hbase</groupId>
      <artifactId>hbase-server</artifactId>
      <version>2.3.5</version>
    </dependency>
  </dependencies>

2.在hbase shell界面中分别输入下面的语句,创建namespace和表

create_namespace 'events_db'

create 'events_db:users','profile','region','registration'

create 'events_db:user_friend','uf'

create 'events_db:events','schedule','location','creator','remark'

create 'events_db:event_attendee','euat'

create 'events_db:train','eu'

3.UserFriendToHB

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

/**
 * 将Kafka中的user_friends数据消费到HBase中
 */
public class UserFriendToHB {
    static int num = 0;// 计数器

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.180.147:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");// 手动提交
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");// 自动提交时,提交时间
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "userfriend_group");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
        consumer.subscribe(Collections.singleton("user_friends"));

        // 配置HBase信息,连接HBase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.180.147:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.180.147");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");

        Connection connection = null;
        try {
            connection = ConnectionFactory.createConnection(conf);
            Table userFriendTable = connection.getTable(TableName.valueOf("events_db:user_friend"));
            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record :
                        poll) {
                    System.out.println(record.value());// userid,friendid
                    String[] split = record.value().split(",");
                    Put put = new Put(Bytes.toBytes(((split[0]) + split[1]).hashCode()));
                    put.addColumn("uf".getBytes(), "user_id".getBytes(), split[0].getBytes());
                    put.addColumn("uf".getBytes(), "friend_id".getBytes(), split[1].getBytes());
                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("---------------------------------num:" + num);
                if (datas.size() != 0)
                    userFriendTable.put(datas);
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

4.UsersToHB

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

public class UsersToHB {
    static int num = 0;// 计数器

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.180.147:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");// 手动提交
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");// 自动提交时,提交时间
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "users_group");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
        consumer.subscribe(Collections.singleton("users"));

        // 配置HBase信息,连接HBase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.180.147:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.180.147");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");

        Connection connection = null;

        try {
            connection = ConnectionFactory.createConnection(conf);
            Table usersTable = connection.getTable(TableName.valueOf("events_db:users"));
            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record :
                        poll) {
                    System.out.println(record.value());
                    String[] user = record.value().split(",");
                    Put put = new Put(Bytes.toBytes(user[0]));
                    put.addColumn("profile".getBytes(), "birthyear".getBytes(), user[2].getBytes());
                    put.addColumn("profile".getBytes(), "gender".getBytes(), user[3].getBytes());
                    put.addColumn("region".getBytes(), "locale".getBytes(), user[1].getBytes());
                    if (user.length > 5)
                        put.addColumn("region".getBytes(), "location".getBytes(), user[5].getBytes());
                    if (user.length > 6)
                        put.addColumn("region".getBytes(), "timezone".getBytes(), user[6].getBytes());
                    if (user.length > 4)
                        put.addColumn("registration".getBytes(), "joinedAt".getBytes(), user[4].getBytes());
                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("---------------------------------num:" + num);
                if (datas.size() != 0)
                    usersTable.put(datas);
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

5.TrainToHB

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

public class TrainToHB {
    static int num = 0;// 计数器

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.180.147:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");// 手动提交
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");// 自动提交时,提交时间
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "train_group");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
        consumer.subscribe(Collections.singleton("train"));

        // 配置HBase信息,连接HBase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.180.147:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.180.147");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");

        Connection connection = null;

        try {
            connection = ConnectionFactory.createConnection(conf);
            Table trainTable = connection.getTable(TableName.valueOf("events_db:train"));
            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record :
                        poll) {
                    System.out.println(record.value());
                    String[] train = record.value().split(",");
                    double random = Math.random();
                    Put put = new Put(Bytes.toBytes(train[0]+train[1]+random));
                    put.addColumn("eu".getBytes(),"user".getBytes(),train[0].getBytes());
                    put.addColumn("eu".getBytes(),"event".getBytes(),train[1].getBytes());
                    put.addColumn("eu".getBytes(),"invited".getBytes(),train[2].getBytes());
                    put.addColumn("eu".getBytes(),"timestamp".getBytes(),train[3].getBytes());
                    put.addColumn("eu".getBytes(),"interested".getBytes(),train[4].getBytes());
                    put.addColumn("eu".getBytes(),"not_interested".getBytes(),train[5].getBytes());

                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("---------------------------------num:" + num);
                if (datas.size() != 0)
                    trainTable.put(datas);
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

6.EventsToHB

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

public class EventsToHB {
    static int num = 0;// 计数器

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.180.147:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");// 手动提交
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");// 自动提交时,提交时间
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "events_group");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
        consumer.subscribe(Collections.singleton("events"));

        // 配置HBase信息,连接HBase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.180.147:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.180.147");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");

        Connection connection = null;

        try {
            connection = ConnectionFactory.createConnection(conf);
            Table eventsTable = connection.getTable(TableName.valueOf("events_db:events"));
            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record :
                        poll) {
                    System.out.println(record.value());
                    String[] events = record.value().split(",");
                    Put put = new Put(Bytes.toBytes((events[0])));
                    put.addColumn("creator".getBytes(), "userid".getBytes(), Bytes.toBytes(events[1]));
                    put.addColumn("schedule".getBytes(), "starttime".getBytes(), Bytes.toBytes(events[2]));
                    put.addColumn("location".getBytes(), "city".getBytes(), Bytes.toBytes(events[3]));
                    put.addColumn("location".getBytes(), "state".getBytes(), Bytes.toBytes(events[4]));
                    put.addColumn("location".getBytes(), "zip".getBytes(), Bytes.toBytes(events[5]));
                    put.addColumn("location".getBytes(), "country".getBytes(), Bytes.toBytes(events[6]));
                    put.addColumn("location".getBytes(), "lat".getBytes(), Bytes.toBytes(events[7]));
                    put.addColumn("location".getBytes(), "lng".getBytes(), Bytes.toBytes(events[8]));
                    put.addColumn("remark".getBytes(), "commonwords".getBytes(), Bytes.toBytes(events[9]));

                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("---------------------------------num:" + num);
                if (datas.size() != 0)
                    eventsTable.put(datas);
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

7.EventAttendeToHb

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.io.IOException;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Properties;

public class EventAttendeToHb {
    static int num = 0;// 计数器

    public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.180.147:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");// 手动提交
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");// 自动提交时,提交时间
        prop.put(ConsumerConfig.GROUP_ID_CONFIG, "eventattendee_group");
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(prop);
        consumer.subscribe(Collections.singleton("event_attendees"));

        // 配置HBase信息,连接HBase数据库
        Configuration conf = HBaseConfiguration.create();
        conf.set(HConstants.HBASE_DIR, "hdfs://192.168.180.147:9000/hbase");
        conf.set(HConstants.ZOOKEEPER_QUORUM, "192.168.180.147");
        conf.set(HConstants.CLIENT_PORT_STR, "2181");

        Connection connection = null;

        try {
            connection = ConnectionFactory.createConnection(conf);
            Table table = connection.getTable(TableName.valueOf("events_db:event_attendee"));
            while (true) {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(100));
                ArrayList<Put> datas = new ArrayList<>();
                for (ConsumerRecord<String, String> record :
                        poll) {
                    System.out.println(record.value());// eventid,friendid,yes/no/maybe
                    String[] eventattend = record.value().split(",");
                    Put put = new Put(Bytes.toBytes((eventattend[0]+eventattend[1]+eventattend[2])));
                    put.addColumn("euat".getBytes(),"eventid".getBytes(),Bytes.toBytes(eventattend[0]));
                    put.addColumn("euat".getBytes(),"friendid".getBytes(),Bytes.toBytes(eventattend[1]));
                    put.addColumn("euat".getBytes(),"state".getBytes(),Bytes.toBytes(eventattend[2]));

                    datas.add(put);
                }
                num = num + datas.size();
                System.out.println("---------------------------------num:" + num);
                if (datas.size() != 0)
                    table.put(datas);
                try {
                    Thread.sleep(10);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/494389.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

软考信管高级——成本管理

成本管理内容 成本基准 成本基准是经过批准的&#xff0c;不包括管理储备&#xff0c;只有正式变更流程才能变更成本基准成本基准管理储备项目预算&#xff0c;有必要动用管理储备变更时&#xff0c;获变更批准后&#xff0c;把适量管理储备移入成本基准中 应急储备和管理储备…

Map接口以及Collections工具类

文章目录 1.Map接口概述1.1 Map的实现类的结构1.2 Map中存储的key-value结构的理解1.3 HashMap的底层实现原理(以JDK7为例)1.4 Map接口的常用方法1.5 TreeMap1.6 Map实现类之五: Properties 1.Collections工具类1.1方法1.1.1 排序操作(均为static方法)1.1.2 查找、替换 1.Map接…

PMP项目管理-[第八章]质量管理

质量管理知识体系&#xff1a; 规划质量管理&#xff1a; 管理质量&#xff1a; 控制质量 &#xff1a; 8.1 质量和等级的区别 质量定义&#xff1a;作为实现的性能或成果&#xff0c;是一系列内在特性满足要求的程度 等级定义&#xff1a;作为设计意图&#xff0c;是对用途相同…

Android 历代版本主要变化

这里只站在开发者的角度&#xff0c;谈论一下 Android 5.0 之后的版本的主要变化 Android 5.0 L 开始支持64位的处理器开始全面由 Dalvik 虚拟机转成ART虚拟机 Android 6.0 M 增加全新的动态权限机制&#xff08;运行时权限&#xff09;取消支持 Apache HTTP 客户端 Andro…

队列,双端队列,栈结构

java.util.Queue接口.队列 Queue继承自Collection. 队列可以保存一组元素,但是存取元素必须遵循先进先出原则:FIFO(First Input First Output) 常用实现类:LinkedList 双端队列 java.util.Deque Deque继承自Queue 双端队列是队列两端都可以做出入对操作的队列. 常用实现类…

适用于Win和Mac的专业电脑数据恢复软件EasyRecovery易恢复14

EasyRecovery易恢复一键轻松找回丢失数据 支持电脑、相机、移动硬盘、U盘、SD卡、内存卡、光盘、本地电子邮件和 RAID 磁盘阵列等各类存储设备的数据恢复。EasyRecovery还可以支持恢复从硬盘、光盘、U盘、数码相机、手机等各种设备中恢复被删除或丢失的文件&#xff0c;只是使用…

分享kubernetes部署:cachecloud部署说明

cachecloud部署 cachecloud是搜狐视频(sohutv)Redis私有云平台 已省略~ 挑选一台服务器部署cachecloud-web 将cachecloud-web打成war包&#xff0c;可以在服务器上打包&#xff0c;也可以在本地打包&#xff0c;这里举一个在服务器上打包的例子。 将源代码下载到/opt下 已省略~…

HID Relay, 有线键盘转蓝牙项目学习:记一次失败的尝试

HID Relay, 有线键盘转蓝牙项目学习&#xff1a;记一次失败的尝试 开始学习嵌入式后&#xff0c;最难受的一个点在于电脑端口不够。我的电脑有两个USB口一个TypeC口&#xff0c;鼠标和键盘都要插USB口&#xff0c;stm32和51也都要插USB口。 那么烧录的时候&#xff0c;要么就…

SwiftUI 如何让文本自动支持查找和替换功能?

概览 有些情况下&#xff0c;我们需要为文本编辑器实现文本的查找和替换功能&#xff08;find & replace&#xff09;&#xff0c;如果完全靠自已撸码还是比较棘手的。 所幸的是&#xff0c;从 SwiftUI 4.0 &#xff08;iOS 16&#xff09;开始&#xff0c;Apple 已经将查…

JavaWeb ( 六 ) JSP

2.4.JSP JSP (Java Server Pages) : 一种在服务器端生成动态页面的技术&#xff0c;本质上就是Servlet。将HTML代码嵌入到Java代码中, 通过Java逻辑控制HTML代码的结构从而生成页面。在MVC中通常担任视图层&#xff08;view&#xff09;&#xff0c;负责信息的展示与收集。 2…

浅谈MySQL索引以及执行计划

MySQL索引及执行计划 &#x1f42a;索引的作用&#x1f42b;索引的分类&#xff08;算法&#xff09;&#x1f999;BTREE索引算法演变&#x1f992;Btree索引功能上的分类4.1 辅助索引4.2 聚集索引4.3 辅助索引和聚集索引的区别 &#x1f418;辅助索引分类&#x1f98f;索引树高…

了解MSIL汇编和IL汇编评估堆栈

.assembly extern mscorlib {}.assembly Test{.ver 1:0:1:0}.module test.exe.method static void main() cil managed{.maxstack 1.entrypointldstr "I am from the IL Assembly Language..."call void [mscorlib]System.Console::WriteLine (string)ret} 这是MSIL…

spring-transaction源码分析(2)EnableTransactionManagement注解

概述(Java doc) 该注解开启spring的注解驱动事务管理功能&#xff0c;通常标注在Configuration类上面用于开启命令式事务管理或响应式事务管理。 Configuration EnableTransactionManagement public class AppConfig {Beanpublic FooRepository fooRepository() {// configur…

【Matter】esp-matter环境下的应用实践(程序烧录及串口监视)

文章目录 esp-matter环境下的应用实践前提准备设置环境变量1.ESP-IDF2.ESP-Matter Matter Example编译下载1.激活esp-matter环境2.选择esp设备3.编译工程4.SDK烧写 esp-matter环境下的应用实践 前提准备 请确保你本地已经配置好 esp-idf 及esp-matter环境&#xff0c;可参考此…

经典文献阅读之--PCAccumulation(动态三维场景构建)

0. 简介 多波束激光雷达传感器&#xff0c;常用于自动驾驶汽车和移动机器人&#xff0c;获取三维范围扫描序列&#xff08;“帧”&#xff09;。由于角度扫描分辨率有限和遮挡&#xff0c;每帧只稀疏地覆盖场景。稀疏性限制了下游过程的性能&#xff0c;如语义分割或表面重建。…

北京地铁:充分发挥数据价值,全面提升业财融合能力

4月19日-4月21日&#xff0c;一年一度的用友BIP技术大会圆满召开。来自行业领先企业的CIO/CDO、生态伙伴、开发者、分析师、媒体等共聚北京用友产业园&#xff0c;了解最新技术发展趋势、探讨行业热点话题。会上&#xff0c;北京地铁运营有限公司&#xff08;以下简称“北京地铁…

shared_ptr 的线程安全性

多线程环境下&#xff0c;调用不同的 shared_ptr 实例的成员函数是不需要额外的同步手段的&#xff0c;即使这些 shared_ptr 管理的是相同的对象。 多线程对于同一个 shared_ptr 实例的读操作&#xff08;访问&#xff09;可以保证线程安全&#xff1b;但对于同一个 shared_ptr…

对标世界一流|从Just in time到Just in case ——汽车行业供应链管理经验借鉴

01 丰田汽车精益生产 作为最复杂和最成熟的供应链之一&#xff0c;汽车行业供应链无疑是供应链领域集大成者&#xff0c;而提起汽车行业供应链&#xff0c;就不得不提到丰田汽车&#xff1b;提到丰田汽车&#xff0c;就肯定离不开大名鼎鼎的精益生产以及JIT模式。 JIT模式由丰…

【UE】高级载具插件-05-扫描材质

效果 步骤 1. 首先创建一个材质 再创建一个材质参数集 创建材质实例 2. 将后期处理体积放入场景&#xff0c;并缩放至合适的大小使其全方位覆盖 在后期处理材质中添加一个数组 选择资产引用 将材质实例拖入 3. 打开参数集&#xff0c;添加一个数组元素 设置默认值和参数名 4. …

2023年湖北省中级职称评审申报条件是什么呢?学历不够可以申报吗?

2023年湖北省中级职称评审申报条件是什么呢&#xff1f;学历不够可以申报吗&#xff1f; 1.大专毕业从事本专业技术工作7年以上&#xff1b;助理满足4年。 2.本科毕业从事本专业技术工作5年以上&#xff0c;助理满足4年。 一般来说中专或者高中学历是无法申请中级职称的&…