docker-compose部署hive数仓服务 —— 筑梦之路

news2024/11/18 2:52:23

1. docker创建网络

# 创建,注意不能使用hadoop-network
docker network create hadoop_network

# 查看
docker network ls

2. mysql部署

# 拉取镜像

docker pull  mysql:5.7

# 生成配置

mkdir -p conf/ data/db/

cat > conf/my.cnf <<EOF
[mysqld]
character-set-server=utf8
log-bin=mysql-bin
server-id=1
pid-file        = /var/run/mysqld/mysqld.pid
socket          = /var/run/mysqld/mysqld.sock
datadir         = /var/lib/mysql
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
symbolic-links=0
secure_file_priv =
wait_timeout=120
interactive_timeout=120
default-time_zone = '+8:00'
skip-external-locking
skip-name-resolve
open_files_limit = 10240
max_connections = 1000
max_connect_errors = 6000
table_open_cache = 800
max_allowed_packet = 40m
sort_buffer_size = 2M
join_buffer_size = 1M
thread_cache_size = 32
query_cache_size = 64M
transaction_isolation = READ-COMMITTED
tmp_table_size = 128M
max_heap_table_size = 128M
log-bin = mysql-bin
sync-binlog = 1
binlog_format = ROW
binlog_cache_size = 1M
key_buffer_size = 128M
read_buffer_size = 2M
read_rnd_buffer_size = 4M
bulk_insert_buffer_size = 64M
lower_case_table_names = 1
explicit_defaults_for_timestamp=true
skip_name_resolve = ON
event_scheduler = ON
log_bin_trust_function_creators = 1
innodb_buffer_pool_size = 512M
innodb_flush_log_at_trx_commit = 1
innodb_file_per_table = 1
innodb_log_buffer_size = 4M
innodb_log_file_size = 256M
innodb_max_dirty_pages_pct = 90
innodb_read_io_threads = 4
innodb_write_io_threads = 4
EOF
cat  docker-compose.yml

version: '3'
services:
  db:
    image: mysql:5.7 #mysql版本
    container_name: mysql
    hostname: mysql
    volumes:
      - ./data/db:/var/lib/mysql
      - ./conf/my.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf
    restart: always
    ports:
      - 13306:3306
    networks:
      - hadoop_network
    environment:
      MYSQL_ROOT_PASSWORD: 123456 #访问密码
      secure_file_priv:
    healthcheck:
      test: ["CMD-SHELL", "curl -I localhost:3306 || exit 1"]
      interval: 10s
      timeout: 5s
      retries: 3

# 连接外部网络
networks:
  hadoop_network:
    external: true
docker-compose up -d

docker-compose ps

3. 部署hive数仓服务

1)下载

# 下载
wget http://archive.apache.org/dist/hive/hive-3.1.3/apache-hive-3.1.3-bin.tar.gz

# 解压
tar -zxvf apache-hive-3.1.3-bin.tar.gz

2)配置文件

cat images/hive-config/hive-site.xml

<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
        <!-- 配置hdfs存储目录 -->
        <property>
                <name>hive.metastore.warehouse.dir</name>
                <value>/user/hive_remote/warehouse</value>
        </property>

        <property>
                <name>hive.metastore.local</name>
                <value>false</value>
        </property>

        <!-- 所连接的 MySQL 数据库的地址,hive_local是数据库,程序会自动创建,自定义就行 -->
        <property>
                <name>javax.jdo.option.ConnectionURL</name>
                <value>jdbc:mysql://mysql:3306/hive_metastore?createDatabaseIfNotExist=true&amp;useSSL=false&amp;serverTimezone=Asia/Shanghai</value>
        </property>

        <!-- MySQL 驱动 -->
        <property>
                <name>javax.jdo.option.ConnectionDriverName</name>
                <!--<value>com.mysql.cj.jdbc.Driver</value>-->
                <value>com.mysql.jdbc.Driver</value>
        </property>

        <!-- mysql连接用户 -->
        <property>
                <name>javax.jdo.option.ConnectionUserName</name>
                <value>root</value>
        </property>

        <!-- mysql连接密码 -->
        <property>
                <name>javax.jdo.option.ConnectionPassword</name>
                <value>123456</value>
        </property>

        <!--元数据是否校验-->
        <property>
                <name>hive.metastore.schema.verification</name>
                <value>false</value>
        </property>

        <property>
                <name>system:user.name</name>
                <value>root</value>
                <description>user name</description>
        </property>

        <property>
                <name>hive.metastore.uris</name>
                <value>thrift://hive-metastore:9083</value>
        </property>

        <!-- host -->
        <property>
                <name>hive.server2.thrift.bind.host</name>
                <value>0.0.0.0</value>
                <description>Bind host on which to run the HiveServer2 Thrift service.</description>
        </property>

        <!-- hs2端口 默认是10000-->
        <property>
                <name>hive.server2.thrift.port</name>
                <value>10000</value>
        </property>

        <property>
                <name>hive.server2.active.passive.ha.enable</name>
                <value>true</value>
        </property>

</configuration>

3)启动脚本hive-env.sh

#!/usr/bin/env sh


wait_for() {
    echo Waiting for $1 to listen on $2...
    while ! nc -z $1 $2; do echo waiting...; sleep 1s; done
}

start_hdfs_namenode() {

        if [ ! -f /tmp/namenode-formated ];then
                ${HADOOP_HOME}/bin/hdfs namenode -format >/tmp/namenode-formated
        fi

        ${HADOOP_HOME}/bin/hdfs --loglevel INFO --daemon start namenode

        tail -f ${HADOOP_HOME}/logs/*namenode*.log
}

start_hdfs_datanode() {

        wait_for $1 $2

        ${HADOOP_HOME}/bin/hdfs --loglevel INFO --daemon start datanode

        tail -f ${HADOOP_HOME}/logs/*datanode*.log
}

start_yarn_resourcemanager() {

        ${HADOOP_HOME}/bin/yarn --loglevel INFO --daemon start resourcemanager

        tail -f ${HADOOP_HOME}/logs/*resourcemanager*.log
}

start_yarn_nodemanager() {

        wait_for $1 $2

        ${HADOOP_HOME}/bin/yarn --loglevel INFO --daemon start nodemanager

        tail -f ${HADOOP_HOME}/logs/*nodemanager*.log
}

start_yarn_proxyserver() {

        wait_for $1 $2

        ${HADOOP_HOME}/bin/yarn --loglevel INFO --daemon start proxyserver

        tail -f ${HADOOP_HOME}/logs/*proxyserver*.log
}

start_mr_historyserver() {

        wait_for $1 $2

        ${HADOOP_HOME}/bin/mapred --loglevel INFO  --daemon  start historyserver

        tail -f ${HADOOP_HOME}/logs/*historyserver*.log
}

start_hive_metastore() {

        if [ ! -f ${HIVE_HOME}/formated ];then
                schematool -initSchema -dbType mysql --verbose >  ${HIVE_HOME}/formated
        fi

        $HIVE_HOME/bin/hive --service metastore

}

start_hive_hiveserver2() {

        $HIVE_HOME/bin/hive --service hiveserver2
}


case $1 in
        hadoop-hdfs-nn)
                start_hdfs_namenode
                ;;
        hadoop-hdfs-dn)
                start_hdfs_datanode $2 $3
                ;;
        hadoop-yarn-rm)
                start_yarn_resourcemanager
                ;;
        hadoop-yarn-nm)
                start_yarn_nodemanager $2 $3
                ;;
        hadoop-yarn-proxyserver)
                start_yarn_proxyserver $2 $3
                ;;
        hadoop-mr-historyserver)
                start_mr_historyserver $2 $3
                ;;
        hive-metastore)
                start_hive_metastore $2 $3
                ;;
        hive-hiveserver2)
                start_hive_hiveserver2 $2 $3
                ;;
        *)
                echo "请输入正确的服务启动命令~"
        ;;
esac

4)构建镜像Dockerfile

FROM hadoop:v1

COPY hive-config/* ${HIVE_HOME}/conf/

COPY bootstrap.sh /opt/apache/

COPY mysql-connector-java-5.1.49/mysql-connector-java-5.1.49-bin.jar ${HIVE_HOME}/lib/

RUN sudo mkdir -p /home/hadoop/ && sudo chown -R hadoop:hadoop /home/hadoop/

#RUN yum -y install which
docker build -t hadoop_hive:v1 . --no-cache

### 参数解释
# -t:指定镜像名称
# . :当前目录Dockerfile
# -f:指定Dockerfile路径
#  --no-cache:不缓存

5)编写docker-compose.yml

version: '3'
services:
  hadoop-hdfs-nn:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-hdfs-nn
    hostname: hadoop-hdfs-nn
    restart: always
    privileged: true
    env_file:
      - .env
    ports:
      - "30070:${HADOOP_HDFS_NN_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-hdfs-nn"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_HDFS_NN_PORT} || exit 1"]
      interval: 20s
      timeout: 20s
      retries: 3
  hadoop-hdfs-dn-0:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-hdfs-dn-0
    hostname: hadoop-hdfs-dn-0
    restart: always
    depends_on:
      - hadoop-hdfs-nn
    env_file:
      - .env
    ports:
      - "30864:${HADOOP_HDFS_DN_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-hdfs-dn hadoop-hdfs-nn ${HADOOP_HDFS_NN_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_HDFS_DN_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-hdfs-dn-1:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-hdfs-dn-1
    hostname: hadoop-hdfs-dn-1
    restart: always
    depends_on:
      - hadoop-hdfs-nn
    env_file:
      - .env
    ports:
      - "30865:${HADOOP_HDFS_DN_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-hdfs-dn hadoop-hdfs-nn ${HADOOP_HDFS_NN_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_HDFS_DN_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-hdfs-dn-2:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-hdfs-dn-2
    hostname: hadoop-hdfs-dn-2
    restart: always
    depends_on:
      - hadoop-hdfs-nn
    env_file:
      - .env
    ports:
      - "30866:${HADOOP_HDFS_DN_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-hdfs-dn hadoop-hdfs-nn ${HADOOP_HDFS_NN_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_HDFS_DN_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-yarn-rm:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-yarn-rm
    hostname: hadoop-yarn-rm
    restart: always
    env_file:
      - .env
    ports:
      - "30888:${HADOOP_YARN_RM_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-yarn-rm"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "netstat -tnlp|grep :${HADOOP_YARN_RM_PORT} || exit 1"]
      interval: 20s
      timeout: 20s
      retries: 3
  hadoop-yarn-nm-0:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-yarn-nm-0
    hostname: hadoop-yarn-nm-0
    restart: always
    depends_on:
      - hadoop-yarn-rm
    env_file:
      - .env
    ports:
      - "30042:${HADOOP_YARN_NM_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-yarn-nm hadoop-yarn-rm ${HADOOP_YARN_RM_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_YARN_NM_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-yarn-nm-1:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-yarn-nm-1
    hostname: hadoop-yarn-nm-1
    restart: always
    depends_on:
      - hadoop-yarn-rm
    env_file:
      - .env
    ports:
      - "30043:${HADOOP_YARN_NM_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-yarn-nm hadoop-yarn-rm ${HADOOP_YARN_RM_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_YARN_NM_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-yarn-nm-2:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-yarn-nm-2
    hostname: hadoop-yarn-nm-2
    restart: always
    depends_on:
      - hadoop-yarn-rm
    env_file:
      - .env
    ports:
      - "30044:${HADOOP_YARN_NM_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-yarn-nm hadoop-yarn-rm ${HADOOP_YARN_RM_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "curl --fail http://localhost:${HADOOP_YARN_NM_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-yarn-proxyserver:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-yarn-proxyserver
    hostname: hadoop-yarn-proxyserver
    restart: always
    depends_on:
      - hadoop-yarn-rm
    env_file:
      - .env
    ports:
      - "30911:${HADOOP_YARN_PROXYSERVER_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-yarn-proxyserver hadoop-yarn-rm ${HADOOP_YARN_RM_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "netstat -tnlp|grep :${HADOOP_YARN_PROXYSERVER_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hadoop-mr-historyserver:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hadoop-mr-historyserver
    hostname: hadoop-mr-historyserver
    restart: always
    depends_on:
      - hadoop-yarn-rm
    env_file:
      - .env
    ports:
      - "31988:${HADOOP_MR_HISTORYSERVER_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hadoop-mr-historyserver hadoop-yarn-rm ${HADOOP_YARN_RM_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "netstat -tnlp|grep :${HADOOP_MR_HISTORYSERVER_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 3
  hive-metastore:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hive-metastore
    hostname: hive-metastore
    restart: always
    depends_on:
      - hadoop-hdfs-dn-2
    env_file:
      - .env
    ports:
      - "30983:${HIVE_METASTORE_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hive-metastore hadoop-hdfs-dn-2 ${HADOOP_HDFS_DN_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "netstat -tnlp|grep :${HIVE_METASTORE_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 5
  hive-hiveserver2:
    image: hadoop_hive:v1
    user: "hadoop:hadoop"
    container_name: hive-hiveserver2
    hostname: hive-hiveserver2
    restart: always
    depends_on:
      - hive-metastore
    env_file:
      - .env
    ports:
      - "31000:${HIVE_HIVESERVER2_PORT}"
    command: ["sh","-c","/opt/apache/bootstrap.sh hive-hiveserver2 hive-metastore ${HIVE_METASTORE_PORT}"]
    networks:
      - hadoop_network
    healthcheck:
      test: ["CMD-SHELL", "netstat -tnlp|grep :${HIVE_HIVESERVER2_PORT} || exit 1"]
      interval: 30s
      timeout: 30s
      retries: 5

# 连接外部网络
networks:
  hadoop_network:
    external: true

6)启动测试验证

 【问题】

如果出现以下类似的错误,是因为多次启动,之前的数据还在,但是datanode的IP是已经变了的(宿主机部署就不会有这样的问题,因为宿主机的IP是固定的),所以需要刷新节点,当然也可清理之前的旧数据,不推荐清理旧数据,推荐使用刷新节点的方式(如果有对外挂载的情况下,像我这里没有对外挂载,是因为之前旧容器还在,下面有几种解决方案):

org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException): Datanode denied communication with namenode because the host is not in the include-list: DatanodeRegistration(172.30.0.12:9866, datanodeUuid=f8188476-4a88-4cd6-836f-769d510929e4, infoPort=9864, infoSecurePort=0, ipcPort=9867, storageInfo=lv=-57;cid=CID-f998d368-222c-4a9a-88a5-85497a82dcac;nsid=1840040096;c=1680661390829) 

【解决方案】

1. 删除旧容器重启启动 

# 清理旧容器
docker rm `docker ps -a|grep 'Exited'|awk '{print $1}'`

# 重启启动服务
docker-compose -f docker-compose.yaml up -d

# 查看
docker-compose -f docker-compose.yaml ps

2. 登录 namenode 刷新 datanode

docker exec -it hadoop-hdfs-nn hdfs dfsadmin -refreshNodes

3. 登录 任意节点刷新 datanode

# 这里以 hadoop-hdfs-dn-0 为例
docker exec -it hadoop-hdfs-dn-0 hdfs dfsadmin -fs hdfs://hadoop-hdfs-nn:9000 -refreshNodes

4. 在docker-compose中给每个容器固定IP

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/611573.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

今天面了个字节跳动拿30k出来的测试大佬,让我见识到了什么是天花板

2022年堪称大学生就业最难的一年&#xff0c;应届毕业生人数是1076万。失业率超50%&#xff01; 但是我观察到一个数据&#xff0c;那就是已经就业的毕业生中&#xff0c;计算机通信等行业最受毕业生欢迎&#xff01; 计算机IT行业薪资高&#xff0c;平均薪资是文科其他岗位的…

Apache 日志解析和分析工具

ApacheWeb服务器在企业中广泛用于托管其网站和Web应用程序。Apache 服务器生成的原始日志提供了有关 Apache 服务器托管的网站如何处理用户请求以及访问您的网站时经常遇到的错误的宝贵信息。 什么是 Apache 日志 Apache 日志包含 Apache Web 服务器处理的所有事件的记录 - 从…

【Linux】LNMP框架的架构与环境配置

提示&#xff1a;文章写完后&#xff0c;目录可以自动生成&#xff0c;如何生成可参考右边的帮助文档 LNMP框架的架构与环境配置 一、安装 Nginx 服务1.关闭防火墙及安装依赖包2、创建运行用户3、编译安装4、优化路径5、添加 Nginx 系统服务 二、安装 MySQL 服务1、安装Mysql环…

图论与算法(4)图的深度优先遍历应用

1. 无向图的联通分量个数 1.1 联通分量个数 无向图的联通分量个数是指图中无法通过边连接到其他分量的顶点集合的个数。可以通过深度优先搜索或广度优先搜索来计算无向图的联通分量个数。 1.2 记录联通分量 &#xff08;1&#xff09;多个联通量的数&#xff1a; 7 6 0 1 0…

【MCS-51单片机汇编语言】期末复习总结⑥——串口通信(题型六)

文章目录 知识准备发送/接收缓冲器 SBUF串口通信控制寄存器SCON电源控制寄存器 PCON各个工作方式波特率的设定 常考题型例题1题目描述题目解析题解 例题2题目描述题解 知识准备 发送/接收缓冲器 SBUF 单片机在发送或接收数据的前先将数据存储在SBUF中&#xff1b;接收&#x…

STM32单片机蓝牙APP语音识别取暖器GSM短信超温报警

实践制作DIY- GC0141-蓝牙APP语音识别取暖器 基于STM32单片机设计---蓝牙APP语音识别取暖器 二、功能介绍&#xff1a; 电路&#xff1a;STM32F103C最小系统DS18B20温度传感器 多个按键 LCD1602显示器 1个串口语音识别模块1个5V 加热片 模拟加热蜂鸣器SIM800 GSM短信模块 HC0…

Type-C口统一在即,多节锂电池充放电管理难题何解?

在USB PD3.0时代&#xff0c;100W的充电功率已经能够满足绝大多数便携设备的充电需求&#xff0c;如智能手机、平板电脑、笔记本电脑等。最新的USB PD3.1快充标准&#xff0c;充电功率从原有的100W提升至240W&#xff0c;并支持最大48V的电压输出&#xff0c;将快充场景进一步延…

第四章 部署远程访问服务

♥️作者介绍&#xff1a;奇妙的大歪 ♥️个人名言&#xff1a;但行前路&#xff0c;不负韶华&#xff01; ♥️个人简介&#xff1a;云计算网络运维专业人员 目录 一.什么是远程访问&#xff1f; 二.远程访问的组成 三.远程访问的方式有哪些&#xff1f; 一、远程访问软件…

2017 年一月联考逻辑真题

2017 年一月联考逻辑真题 真题&#xff08;2017-26&#xff09; 26. 倪教授认为&#xff0c;我国工程技术领域可以考虑与国外先进技术合作&#xff0c;但任何涉及核心技术的项目就不能受制于人&#xff0c;我国许多网络安全建设项目涉及信息核心技术。如果全盘引进国外先进技术…

深圳市有什么靠谱的PMP机构推荐吗?

PMP项目管理专业人士资格认证是由美国项目管理协会&#xff08;Project Management Institute&#xff0c;简称PMI&#xff09;发起的。PMP作为世界级的项目管理认证证书&#xff0c;拥有着最先进的项目管理知识体系&#xff0c;它严格评估项目管理人员知识技能是否具有高品质的…

Soundful:AI音乐生成器

【产品介绍】 Soundful是一个基于人工智能的AI音乐生成器&#xff0c;可以让你在点击按钮的瞬间&#xff0c;生成适合视频、直播、播客等内容的免版税背景音乐。不仅拥有多种风格和情绪的模板&#xff0c;还可以让你下载高质量的音轨和分轨&#xff0c;以及自定义音乐的参数。…

基于Django Admin+HttpRunner-1.5.6开发简易的接口测试平台

前言 这是一个使用HttpRunner开发接口平台的简单Demo。 新建Django项目 安装依赖包 pip install httprunner1.5.6 -i https://pypi.doubanio.com/simple/ 模型规划 项目Project&#xff1a;包含 名称、创建时间、修改时间测试套件TestSuite&#xff1a;对应HttpRunner的一个…

自学测试半年,终于收到了字节的offer,那一刻我哭出了声...

我是一名毕业于普通一本的化学专业学生&#xff0c;毕业的两年时间里&#xff0c;我一直奔波在化工厂里。每天工作三班倒&#xff0c;下了班就是一包烟一瓶酒&#xff0c;生活过得非常堕落。 原本想着虽然每天很累&#xff0c;但是至少稳定。然而没有想到的是&#xff0c;化工…

【复杂网络建模】——通过平均度和随机概率构建ER网络(Python)

&#x1f935;‍♂️ 个人主页&#xff1a;Lingxw_w的个人主页 ✍&#x1f3fb;作者简介&#xff1a;计算机科学与技术研究生在读 &#x1f40b; 希望大家多多支持&#xff0c;我们一起进步&#xff01;&#x1f604; 如果文章对你有帮助的话&#xff0c; 欢迎评论 &#x1f4a…

频谱仪设置积分功率

按键作用说明&#xff1a; 1&#xff0c;freq&#xff1a;设置频谱仪显示要采集的中心频率范围&#xff0c;先观察明白要测的是哪一段。 按下freq后&#xff0c;手动输入数字大小&#xff0c;然后从竖着的一列选择单位。 2&#xff0c;SPAN:以扫描频率为中心&#xff0c;信号…

大型企业数智化关键举措太难懂?这本数智平台白皮书带你秒理解

“IDC观点&#xff1a;未来企业都会成为数字原生企业&#xff0c;数智化业务将成为主流&#xff0c;因而企业需要积极探索适合自己的数智化转型方法&#xff0c;统筹结合外部业务商业创新、内部管理变革以及产业互联进程&#xff0c;并密切关注数智化升级和转型过程的安全性和可…

Python的缩进规则

目录 缩进规则 缩进异常 IDLE 开发环境对缩进量的设置 缩进规则 和其它程序设计语言&#xff08;如 Java、C 语言&#xff09;采用大括号“{}”分隔代码块不同&#xff0c;Python 采用代码缩进和冒号&#xff08; : &#xff09;来区分代码块之间的层次。 在 Python 中&…

Android 反编译工具 jadx-gui

jadx-gui 是一种基于 jadx 项目的图形界面工具&#xff0c;用于反编译 Android 应用程序的工具。通过使用jadx-gui&#xff0c;开发人员可以打开 APK&#xff08;Android应用程序包&#xff09;文件&#xff0c;并查看其反编译的源代码。这对于分析、理解和调试 Android 应用程…

组合预测模型 | ARIMA-LSTM时间序列预测(Python)

组合预测模型 | ARIMA-LSTM时间序列预测&#xff08;Python&#xff09; 目录 组合预测模型 | ARIMA-LSTM时间序列预测&#xff08;Python&#xff09;预测结果基本介绍程序设计参考资料 预测结果 基本介绍 ARIMA-LSTM时间序列预测&#xff08;Python完整源码和数据&#xff09…

设计模式——中介者

1.定义 用一个中介对象封装一系列对象交互&#xff0c;中介者使各对象不需要显示的相互引用&#xff0c;从而使其耦合松散&#xff0c;而且可以独立的改变它们之间的交互。 2.使用场景 1、系统中对象之间存在比较复杂的引用关系&#xff0c;导致它们之间的依赖关系结构混乱而…