Flink任务优化分享

news2024/11/23 15:15:56

Flink任务优化分享

1.背景介绍

线上计算任务在某版本上线之后发现每日的任务时长都需要三个多小时才能完成,计算时间超过了预估时间,通过Dolphinscheduler的每日调度任务看,在数据层 dwd 的数据分段任务存在严重的性能问题,每天的计算耗时将近40分钟,并且随着数据量的上涨,时间越来越长,因此这个计算节点需要着重优化。

2.改进思路及实施

现在的大数据计算任务是用 flink 执行的,因此优化的入手点就是从 Flink History Server 上看任务的执行计划,找到耗时较多的节点以及是否有节点因为sql逻辑被重复执行,导致耗时较高。

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-GwHDHsdd-1690266698352)(/Users/apple/Documents/yangxin/develop/image-20230413173034642.png)]

如图所示,可以发现计算任务走了三个分叉,从sql最后的输出来看,只有两个insert表操作,所以这里至少有一条分叉是不必要的;然后就是找到分叉点的原因,为什么会导致任务分成了三个分支,这个就需要执行计划慢慢去理,界面上可以点开每个节点看到他的执行计算优化之后的结果,然后来判断这一节点对应了sql的哪一步。着重需要判断的就是产生分支的那个节点

Sort(
    orderBy=[tenant_id ASC, room_id ASC, msg_start_time ASC]
) -> 
Calc(
    select=[__etl_time__, date_id, tenant_id, brand_id, channel, channel_app_id, channel_session_type, msg_id, msg_start_time, msg_end_time, msg_from_id, msg_from_orig_id, 
        msg_from_nk, msg_from_role, msg_to_ids, msg_to_users, msg_type, msg_content, msg_detail, group_chat_info, dialogue_id, room_id, operation_flags, recording_properties, 
        asr_properties, metric_properties, tags, tag_properties, dialogue_properties, lastMsgEndTime, nextMsgStartTime, is_cut_by_msg_time, is_fit_specific_event, pre_is_fit_specific_event, fit_specific_row,
        CAST(FROM_UNIXTIME(w0$o0)) AS start_time,
        CAST(FROM_UNIXTIME(w0$o1)) AS end_time,
        CAST(w1$o0) AS fit_specific_rows,
        GenIsFitConsecutiveRowsAndTime(channel_session_type, tenant_id, CAST(w1$o0), CAST(CAST(FROM_UNIXTIME(w0$o0))), CAST(CAST(FROM_UNIXTIME(w0$o1))), is_fit_specific_event) AS is_fit_specific_flag,
        (is_cut_by_msg_time = _UTF-16LE'1':VARCHAR(2147483647) CHARACTER SET "UTF-16LE") AS $39, // 
        (GenIsFitConsecutiveRowsAndTime(channel_session_type, tenant_id, CAST(w1$o0), CAST(CAST(FROM_UNIXTIME(w0$o0))), CAST(CAST(FROM_UNIXTIME(w0$o1))), is_fit_specific_event) = 1) AS $40
    ]
) -> 
OverAggregate(
    partitionBy=[tenant_id, room_id],
    orderBy=[msg_start_time ASC],
    window#0=[
        LAG(is_fit_specific_flag) AS w0$o0
        RANG BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
    ],
    select=[__etl_time__, date_id, tenant_id, brand_id, channel, channel_app_id, channel_session_type, msg_id, msg_start_time, msg_end_time, msg_from_id, msg_from_orig_id, 
        msg_from_nk, msg_from_role, msg_to_ids, msg_to_users, msg_type, msg_content, msg_detail, group_chat_info, dialogue_id, room_id, operation_flags, recording_properties, 
        asr_properties, metric_properties, tags, tag_properties, dialogue_properties, lastMsgEndTime, nextMsgStartTime, is_cut_by_msg_time, is_fit_specific_event, pre_is_fit_specific_event, fit_specific_row,
        start_time, end_time, fit_specific_rows, is_fit_specific_flag, 
        $39,    // 是否按时间切 
        $40,    // 当前条是否为 1
        w0$o0 -> pre_is_fit_specific_flag // 前一条是否满足特殊规则
    ]
) -> (
    Calc(
        select=[date_id, tenant_id, channel_session_type, msg_id, msg_start_time, room_id, tags,
            IF(($39 OR (w0$o0 IS NULL AND $40) OR ((w0$o0 <> is_fit_specific_flag) IS TRUE AND w0$o0 IS NOT NULL)), 1, 0) AS is_cut_flag,
            CAST(tenant_id) AS $8,
            CAST(msg_start_time) AS $9,
            GenCutPointTypeByFeature(channel_session_type, tenant_id, tags) AS $10
        ]
    ) -> 
    OverAggregate(
        partitionBy=[tenant_id, room_id],
        orderBy=[msg_start_time ASC],
        window#0=[
            COUNT(is_cut_flag) AS w0$o0,
            $SUM0(is_cut_flag) AS w0$o1
            RANG BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
        ],
        select=[date_id, tenant_id, channel_session_type, msg_id, msg_start_time, room_id, tags, is_cut_flag, 
            $8, -> tenant_id
            $9, -> msg_start_time
            $10, -> 特征切分点: START/END/NO
            w0$o0, -> count
            w0$o1 -> sum
        ]
    ) -> 
    Calc(
        select=[
            CONCAT_WS(_UTF-16LE'-', $8, room_id, date_id, CAST(CASE((w0$o0 > 0:BIGINT), w0$o1, null:INTEGER))) AS dialogue_id1,
            channel_session_type, tenant_id, msg_id, $9 AS $f4, tags, $10 AS cutPointType
        ]
    ), 
    #####################
    CREATE TEMPORARY VIEW keep_cutpoint_view AS
    SELECT dialogue_id1, smoothRes.smoothResultVoMap
    FROM (
            SELECT dialogue_id1,
                    smoothCutPoint(channel_session_type, tenant_id, dialogue_id1, msg_id, msg_start_time, tags, cutPointType) AS smoothRes
            FROM gen_cut_type_by_feature_view
            GROUP BY dialogue_id1
    );
    #####################
    
    Calc(
        select=[__etl_time__, date_id, tenant_id, brand_id, channel, channel_app_id, channel_session_type, msg_id, msg_start_time, msg_end_time, msg_from_id, msg_from_orig_id, 
            msg_from_nk, msg_from_role, msg_to_ids, msg_to_users, msg_type, msg_content, msg_detail, group_chat_info, dialogue_id, room_id, operation_flags, recording_properties, 
            asr_properties, metric_properties, tags, tag_properties, dialogue_properties, lastMsgEndTime, nextMsgStartTime, is_cut_by_msg_time, is_fit_specific_event, pre_is_fit_specific_event, 
            fit_specific_row, start_time, end_time, fit_specific_rows, is_fit_specific_flag, 
            w0$o0 AS pre_is_fit_specific_flag, 
            CASE(w0$o0 IS NULL, is_fit_specific_flag, (w0$o0 <> is_fit_specific_flag), 1, 0) AS is_cut_by_specific, 
            IF(($39 OR (w0$o0 IS NULL AND $40) OR ((w0$o0 <> is_fit_specific_flag) IS TRUE AND w0$o0 IS NOT NULL)), 1, 0) AS is_cut_flag, 
            IF((IF(($39 OR (w0$o0 IS NULL AND $40) OR ((w0$o0 <> is_fit_specific_flag) IS TRUE AND w0$o0 IS NOT NULL)), 1, 0) = 1), _UTF-16LE'start', null:VARCHAR(2147483647) CHARACTER SET "UTF-16LE") AS $42,
            CAST(tenant_id) AS $43, 
            GenCutPointTypeByFeature(channel_session_type, tenant_id, tags) AS $44
        ]
    ) -> 
    OverAggregate(
        partitionBy=[tenant_id, room_id],
        orderBy=[msg_start_time ASC],
        window#0=[
            COUNT(is_cut_flag) AS w0$o0,
            $SUM0(is_cut_flag) AS w0$o1
            RANG BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
        ], select=[__etl_time__, date_id, tenant_id, brand_id, channel, channel_app_id, channel_session_type, msg_id, msg_start_time, msg_end_time, msg_from_id, msg_from_orig_id, 
            msg_from_nk, msg_from_role, msg_to_ids, msg_to_users, msg_type, msg_content, msg_detail, group_chat_info, dialogue_id, room_id, operation_flags, recording_properties, 
            asr_properties, metric_properties, tags, tag_properties, dialogue_properties, lastMsgEndTime, nextMsgStartTime, is_cut_by_msg_time, is_fit_specific_event, pre_is_fit_specific_event, 
            fit_specific_row, start_time, end_time, fit_specific_rows, is_fit_specific_flag, pre_is_fit_specific_flag, is_cut_by_specific, is_cut_flag, 
            $42, -> cut_point_type
            $43, 
            $44, 
            w0$o0, 
            w0$o1
        ]
    ) -> 
    Calc(
        select=[date_id, tenant_id, brand_id, channel, channel_app_id, channel_session_type, msg_id, msg_start_time, msg_end_time, msg_from_id, msg_from_orig_id, msg_from_nk, 
            msg_from_role, msg_to_ids, msg_to_users, msg_type, msg_content, msg_detail, group_chat_info, room_id, operation_flags, recording_properties, asr_properties, metric_properties, tags, tag_properties, 
            dialogue_properties, 
            CONCAT_WS(_UTF-16LE'-', $43, room_id, date_id, CAST(CASE((w0$o0 > 0:BIGINT), w0$o1, null:INTEGER))) AS dialogue_id1, 
            $44 AS cutPointType
        ]
    )
)

######################
根据时间+特殊规则先生成一轮 dialogu_id1
特征的切分点命中被提前下推到这个阶段执行

BUG&优化点:
1. 平滑使用了 groupBy 再 join 会主表, 导致计算流走了分支, 导致部分计算逻辑重复执行了, 这一部分可以考虑用 over 聚合来做
  gen_fit_sprcific_flag_view -> gen_cut_flag_view -> gen_dialogue_id_by_cut_flag_view -> gen_cut_type_by_feature_view -> keep_cutpoint_view

2. CASE WHEN pre_is_fit_specific_flag IS NULL THEN is_fit_specific_flag
                    WHEN pre_is_fit_specific_flag <> is_fit_specific_flag THEN 1
                    WHEN pre_is_fit_specific_flag = is_fit_specific_flag THEN 0
                    ELSE 0
                   END AS is_cut_by_specific
    逻辑不对, 导致 GenIsFitConsecutiveRowsAndTime 被重复执行
3. IF(is_cut_flag = 1, 'start', CAST(NULL AS STRING)) AS cut_point_type, 需要判断一下是否还有必要

5. 
======================
Sort(
    orderBy=[dialogue_id1 ASC]
) -> 
SortAggregate(
    isMerge=[false], 
    groupBy=[dialogue_id1], 
    select=[dialogue_id1, 
        smoothCutPoint(channel_session_type, tenant_id, dialogue_id1, msg_id, $f4, tags, cutPointType) AS smoothRes
    ]
) -> 
Calc(
    select=[dialogue_id1, smoothRes.smoothResultVoMap AS smoothResultVoMap]
) -> (
    Correlate(
        invocation=[GetCutPointBySplit($cor7.smoothResultVoMap)],
        correlate=[table(GetCutPointBySplit($cor7.smoothResultVoMap))],
        select=[dialogue_id1,smoothResultVoMap,msgId,cutPointMap],
        rowType=[
            RecordType(
                VARCHAR(2147483647) dialogue_id1, 
                (VARCHAR(2147483647), (VARCHAR(2147483647), VARCHAR(2147483647)) MAP) MAP smoothResultVoMap,
                VARCHAR(2147483647) msgId,
                (VARCHAR(2147483647), VARCHAR(2147483647)) MAP cutPointMap
            )
        ], joinType=[INNER]
    ) -> 
    Calc(
        select=[dialogue_id1, msgId, ITEM(cutPointMap, _UTF-16LE'isKeep') AS isKeep]
    ),
    
    Correlate(
        invocation=[GetCutPointBySplit($cor9.smoothResultVoMap)],
        correlate=[table(GetCutPointBySplit($cor9.smoothResultVoMap))],
        select=[dialogue_id1,smoothResultVoMap,msgId,cutPointMap], 
        rowType=[
            RecordType(
                VARCHAR(2147483647) dialogue_id1, 
                (VARCHAR(2147483647), (VARCHAR(2147483647), VARCHAR(2147483647)) MAP) MAP smoothResultVoMap, 
                VARCHAR(2147483647) msgId, 
                (VARCHAR(2147483647), VARCHAR(2147483647)) MAP cutPointMap
            )
        ], joinType=[INNER]
    ) -> 
    Calc(
        select=[dialogue_id1, msgId, ITEM(cutPointMap, _UTF-16LE'isKeep') AS isKeep]
    ), 
    Correlate(
        invocation=[GetCutPointBySplit($cor8.smoothResultVoMap)], 
        correlate=[table(GetCutPointBySplit($cor8.smoothResultVoMap))], 
        select=[dialogue_id1,smoothResultVoMap,msgId,cutPointMap], 
        rowType=[
            RecordType(
                VARCHAR(2147483647) dialogue_id1, 
                (VARCHAR(2147483647), (VARCHAR(2147483647), VARCHAR(2147483647)) MAP) MAP smoothResultVoMap, 
                VARCHAR(2147483647) msgId, 
                (VARCHAR(2147483647), VARCHAR(2147483647)) MAP cutPointMap
            )
        ], joinType=[INNER]
    ) ->
    Calc(
        select=[dialogue_id1, msgId, ITEM(cutPointMap, _UTF-16LE'isKeep') AS isKeep]
    )
)

######################
计算平滑逻辑

优化点:
1. smoothCutPoint 很大的性能问题, 改成基于 over 聚合的 udaf, 优化掉 GROUPBY + LATERAL TABLE + JOIN

对应sql如下:

--根据配置文件特征数据对数据进行特征切分标记
CREATE TEMPORARY VIEW gen_cut_type_by_feature_view AS
SELECT *,
       GenCutPointTypeByFeature(channel_session_type, tenant_id, tags) AS cutPointType
FROM gen_dialogue_id_by_cut_flag_view;

CREATE TEMPORARY VIEW keep_cutpoint_view AS
SELECT dialogue_id1, smoothRes.smoothResultVoMap
FROM (
         SELECT dialogue_id1,
                smoothCutPoint(channel_session_type, tenant_id, dialogue_id1, msg_id, msg_start_time, tags, cutPointType) AS smoothRes
         FROM gen_cut_type_by_feature_view
         GROUP BY dialogue_id1
);


CREATE TEMPORARY VIEW keep_cutpoint_breakup AS
SELECT dialogue_id1, smoothResultVoMap, msgId, cutPointMap, cutPointMap['isKeep'] AS isKeep
FROM keep_cutpoint_view, LATERAL TABLE(GetCutPointBySplit(smoothResultVoMap)) AS T(msgId, cutPointMap);


CREATE TEMPORARY VIEW keep_cutpoint_join AS
SELECT t1.*,t2.isKeep, IF(t2.isKeep = '0', 'no', t1.cutPointType) AS curCutPointType, msg_start_time
FROM gen_cut_type_by_feature_view t1
LEFT JOIN keep_cutpoint_breakup t2 ON t1.dialogue_id1 = t2.dialogue_id1 AND t1.msg_id = t2.msgId;

CREATE TEMPORARY VIEW gen_dialogue_id_by_feature_view0 AS
SELECT
    date_id,
    tenant_id,
    brand_id,
    channel,
    channel_app_id,
    channel_session_type,
    msg_id,
    msg_start_time,
    msg_end_time,
    msg_from_id,
    msg_from_nk,
    msg_from_orig_id,
    msg_from_role,
    msg_to_ids,
    msg_to_users,
    msg_type,
    msg_content,
    msg_detail,
    group_chat_info,
    room_id,
    operation_flags,
    recording_properties,
    asr_properties,
    metric_properties,
    tags,
    tag_properties,
    dialogue_properties,
    dialogue_id1,
    cutPointType,
    curCutPointType,
    preCutPointType,
    isKeep,
    CONCAT_WS(
            '-',
            CAST(tenant_id AS STRING),
            room_id,
            date_id,
            CAST(
                    SUM(IF(preCutPointType IS NULL OR preCutPointType = 'end' OR curCutPointType = 'start', 1, 0)) OVER (PARTITION BY tenant_id, room_id, date_id ORDER BY msg_start_time)
                AS STRING)
        ) AS dialogue_id
FROM (
         SELECT
             date_id,
             tenant_id,
             brand_id,
             channel,
             channel_app_id,
             channel_session_type,
             msg_id,
             msg_start_time,
             msg_end_time,
             msg_from_id,
             msg_from_nk,
             msg_from_orig_id,
             msg_from_role,
             msg_to_ids,
             msg_to_users,
             msg_type,
             msg_content,
             msg_detail,
             group_chat_info,
             room_id,
             operation_flags,
             recording_properties,
             asr_properties,
             metric_properties,
             tags,
             tag_properties,
             dialogue_properties,
             dialogue_id1,
             cutPointType,
             curCutPointType,
             isKeep,
             LAG(curCutPointType) OVER ( PARTITION BY dialogue_id1 ORDER BY msg_start_time) AS preCutPointType
         FROM keep_cutpoint_join
     );

之前sql对于这个分段平滑逻辑的实现是,先根据idalogue_id group by数据,使用udaf去得到聚合结果,然后在通过msg_id将聚合结果join回原来的明细数据里,这种做法就会产生分岔,不仅性能差,而且会重复执行计算节点导致耗时上升。这种做法在后边的相关性聚合也是差不多的,这样一分析问题就找到了,就是要把聚合结果join回主表这种做法换一种更高效的方式实现,具体改进思路就是将原来这种方式改成基于 over 聚合的 udaf, 优化掉 GROUPBY + LATERAL TABLE + JOIN

优化之后的sql:

--根据配置文件特征数据对数据进行特征切分标记
CREATE TEMPORARY VIEW gen_cut_type_by_feature_view AS
SELECT *,
       GenCutPointTypeByFeature(channel_session_type, tenant_id, tags) AS cutPointType
FROM gen_dialogue_id_by_cut_flag_view;


CREATE TEMPORARY VIEW keep_cutpoint_view AS
SELECT *,
       smooth_result[msg_id]['is_keep'] AS isKeep,
       CASE WHEN smooth_result[msg_id]['is_keep'] = '0' THEN 'no' ELSE cutPointType END AS curCutPointType
FROM(
        SELECT *,
               smoothCutPoint(channel_session_type, tenant_id, dialogue_id1, msg_id, msg_start_time, tags, cutPointType) OVER ( PARTITION BY dialogue_id1) AS smooth_result
        FROM gen_cut_type_by_feature_view
    );

CREATE TEMPORARY VIEW gen_dialogue_id_by_feature_view0 AS
SELECT
    date_id,
    tenant_id,
    brand_id,
    channel,
    channel_app_id,
    channel_session_type,
    msg_id,
    msg_start_time,
    msg_end_time,
    msg_from_id,
    msg_from_nk,
    msg_from_orig_id,
    msg_from_role,
    msg_to_ids,
    msg_to_users,
    msg_type,
    msg_content,
    msg_detail,
    group_chat_info,
    room_id,
    operation_flags,
    recording_properties,
    asr_properties,
    metric_properties,
    tags,
    tag_properties,
    dialogue_properties,
    CONCAT_WS(
            '-',
            CAST(tenant_id AS STRING),
            room_id,
            date_id,
            CAST(
                    SUM( CASE WHEN dialogue_id1 <> preDialogueId OR preCutPointType IS NULL OR preCutPointType = 'end' OR curCutPointType = 'start' THEN 1 ELSE 0 END) OVER (PARTITION BY tenant_id, room_id, date_id ORDER BY msg_start_time)
                AS STRING)
        ) AS dialogue_id
FROM (
         SELECT
             date_id,
             tenant_id,
             brand_id,
             channel,
             channel_app_id,
             channel_session_type,
             msg_id,
             msg_start_time,
             msg_end_time,
             msg_from_id,
             msg_from_nk,
             msg_from_orig_id,
             msg_from_role,
             msg_to_ids,
             msg_to_users,
             msg_type,
             msg_content,
             msg_detail,
             group_chat_info,
             room_id,
             operation_flags,
             recording_properties,
             asr_properties,
             metric_properties,
             tags,
             tag_properties,
             dialogue_properties,
             dialogue_id1,
             cutPointType,
             curCutPointType,
             LAG(dialogue_id1) OVER ( PARTITION BY tenant_id, room_id, date_id ORDER BY msg_start_time) AS preDialogueId,
             LAG(curCutPointType) OVER ( PARTITION BY tenant_id, room_id, date_id ORDER BY msg_start_time) AS preCutPointType
         FROM keep_cutpoint_view
     );

相关性的优化也是一样的思路,改成基于 over 聚合的 udaf,减少聚合结果join回原表的这种操作

相关性sql对比:

CREATE TEMPORARY VIEW dialogue_relevant_view AS
SELECT
    `tenant_id`,
    `brand_id`,
    `channel`,
    `channel_app_id`,
    `channel_session_type`,
    `date_id`,
    dialogue_id as dialogue_id,
    res.relevant_config_version as relevant_config_version ,
    res.relevant_config as relevant_config ,
    res.metrics as metrics ,
    res.dialogue_relevant as dialogue_relevant
FROM (select dialogue_relevant_udaf(channel_session_type, tenant_id, msg_id, msg_start_time, msg_end_time, msg_from_role,tags) as res,
             `tenant_id`,
             `brand_id`,
             `channel`,
             `channel_app_id`,
             `channel_session_type`,
             `date_id`,`dialogue_id`
      from gen_dialogue_id_by_feature_view
      group by `tenant_id`,
               `brand_id`,
               `channel`,
               `channel_app_id`,
               `channel_session_type`,
               `date_id`,`dialogue_id`);

CREATE TEMPORARY VIEW dialogue_view_all AS
select
    NOW() as `__etl_time__`,
    a.date_id,
    a.tenant_id,
    a.brand_id,
    a.channel,
    a.channel_app_id,
    a.channel_session_type,
    a.msg_id,
    a.msg_start_time,
    a.msg_end_time,
    a.msg_from_id,
    a.msg_from_nk,
    a.msg_from_orig_id,
    a.msg_from_role,
    a.msg_to_ids,
    a.msg_to_users,
    a.msg_type,
    a.msg_content,
    a.msg_detail,
    a.group_chat_info,
    a.room_id,
    a.operation_flags,
    a.recording_properties,
    a.asr_properties,
    a.metric_properties,
    a.tags,
    a.tag_properties,
    map_put(map_put(a.dialogue_properties , 'dialogue_relevant' , b.dialogue_relevant),'relevant_config',b.relevant_config)  as dialogue_properties,
    a.dialogue_id1,
    a.cutPointType,
    a.curCutPointType,
    a.preCutPointType,
    a.isKeep,
    a.dialogue_id
from  gen_dialogue_id_by_feature_view  a
          left join dialogue_relevant_view b
                    on  a.tenant_id = b.tenant_id and
                        a.brand_id = b.brand_id and
                        a.channel = b.channel and
                        a.channel_app_id = b.channel_app_id and
                        a.channel_session_type = b.channel_session_type and
                        a.dialogue_id = b.dialogue_id;
                        
#####################################

CREATE TEMPORARY VIEW dialogue_view AS
select
    date_id,
    tenant_id,
    brand_id,
    channel,
    channel_app_id,
    channel_session_type,
    msg_id,
    msg_start_time,
    msg_end_time,
    msg_from_id,
    msg_from_nk,
    msg_from_orig_id,
    msg_from_role,
    msg_to_ids,
    msg_to_users,
    msg_type,
    msg_content,
    msg_detail,
    group_chat_info,
    room_id,
    operation_flags,
    recording_properties,
    asr_properties,
    metric_properties,
    tags,
    tag_properties,
    dialogue_properties,
    dialogue_id,
    dialogue_relevant_udaf(channel_session_type, tenant_id, msg_id, msg_start_time, msg_end_time, msg_from_role,tags) OVER (PARTITION BY `tenant_id`,
               `brand_id`,
               `channel`,
               `channel_app_id`,
               `channel_session_type`,
               `date_id`,`dialogue_id`) AS res
from gen_dialogue_id_by_feature_view ;

3.优化结果

优化之后的执行计划清爽很多,执行速度也有了明显提升,从原来的将近40分钟的计算时长,减少到7分钟,提升巨大

在这里插入图片描述

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/786808.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

安装requests模块及其他依赖库的完美解决办法

业务场景 导入requests库时报错&#xff0c;单独离线下载安装requests&#xff0c;发现仍然报错&#xff0c;问题在于requests库有其他依赖库。 WARNING: Retrying (Retry(total1, connectNone, readNone, redirectNone, statusNone)) after connection broken by NewConnect…

【历史上的今天】7 月 24 日:Caldera 诉微软案;AMD 宣布收购 ATI;谷歌推出 Chromecast

整理 | 王启隆 透过「历史上的今天」&#xff0c;从过去看未来&#xff0c;从现在亦可以改变未来。 今天是 2023 年 7 月 24 日&#xff0c;在 1951 年的今天&#xff0c;晶体管发明家 John Bardeen 通知 AT&T 贝尔实验室&#xff0c;他将离开公司&#xff0c;与 Walter B…

PaddleOCR #PP-OCR常见异常扫雷

异常一&#xff1a;ModuleNotFoundError: No module named ‘tools.infer’ 实验案例&#xff1a; PaddleOCR #使用PaddleOCR进行光学字符识别&#xff08;PP-OCR文本检测识别&#xff09; 参考代码&#xff1a; 图片文本检测实验时&#xff0c;运行代码出现异常&#xff1a;M…

查看端口是否开通的方法

1.使用curl方式 curl -vv telnet://192.168.2.95:6080 在没有wget的本地化服务器中&#xff0c;可以使用该方法查看端口。 2.使用telnet方式 telnet ip 端口 失败的情况&#xff1a; 3.在安卓设备上使用adb的方式查看端口是否开放 adb shell nc ip 端口 4.还有一种思路…

PgSQL-使用技巧-如何衡量网络对性能的影响

PgSQL-使用技巧-如何衡量网络对性能的影响 PG数据库和应用之间常见的部件有连接池、负载平衡组件、路由、防火墙等。我们常常不在意或者认为涉及的网络hops对整体性能产生的额外开销是理所当然的。但在很多情况下&#xff0c;它可能会导致严重的性能损失和拖累整体吞吐量。相当…

Python中的datetime模块

time模块用于取得UNIX纪元时间戳&#xff0c;并加以处理。但是&#xff0c;如果以方便的格式显示日期&#xff0c;或对日期进行算数运算&#xff0c;就应该使用datetime模块。 目录 1. datetime数据类型 1&#xff09; datetime.datetime.now()表示特定时刻 2&#xff09;da…

K8s卷存储详解(一)

K8s卷存储详解&#xff08;一&#xff09; K8s 存储K8s卷分类K8s目前支持的卷类型 临时卷类型EmptyDirCSI 临时卷通用临时卷 K8s 存储 什么是卷&#xff1f;为什么要用卷&#xff1f; 我们知道K8s是基于容器&#xff0c;对各个Pod进行管理的。Pod是由容器构成的&#xff0c;我…

一图读懂数电票丨发票新时代的机遇与挑战

随着国家“金税4期”的推进,“全电票”近期被正式更名为“数电票”。它的出现为企业的发票管理带来了哪些机遇与挑战?下图为你简单梳理: 数电票的应用,将有助于彻底解决现有业务流程中的发票管理环节存在的集中化和自动化难题。它将推动对发票的管理与企业的业务流程、法务工…

区块链与加密货币在Web3中的融入及意义

Web3是指下一代互联网&#xff0c;也被称为去中心化互联网。它的核心理念是建立一个去中心化的经济和社会系统&#xff0c;使得个人和社区能够更加自治和自主&#xff0c;而不依赖于中心化的机构和权力。 在Web3中&#xff0c;区块链和加密货币是非常重要的技术和概念。区块链是…

not allowed to load local resource解决

问题&#xff1a; 报错&#xff1a;not allowed to load local resource 原因&#xff1a; file协议为本地文件传输协议&#xff0c;浏览器为了安全考虑不允许直接访问。 解决&#xff1a; 方法1&#xff1a;把静态资源放在同一个项目中&#xff1b; 方法2&#xff1a;搭建…

uni-app:点击事件的实现(点击信息,实现页面跳转)

样式&#xff1a; 开始页面&#xff1a; 点击后的页面&#xff1a; 路径展示&#xff1a; 核心代码&#xff1a; 前端点击事件&#xff1a; taptest 页面跳转方法写入: test(){ uni.navigateTo({ url:../start_detail/start_detail?ord…

【软件架构】企业架构4A定义

文章目录 前言战略、BA、DA、AA、TA五者的关系1、业务架构&#xff08;BA&#xff09;2、数据架构&#xff08;DA&#xff09;3、应用架构&#xff08;AA&#xff09;4、技术架构&#xff08;TA&#xff09;总结 前言 业务架构是跨系统的业务架构蓝图&#xff0c;应用架构、数…

vue3+elementplus后台管理系统,实现用户登录

目录 1 创建登录页面2 验证用户身份总结 我们的后台管理系统目前已经搭好了一个初步的框架&#xff0c;作为后台系统必备的功能就是用户登录了。我们先问一下chatgpt&#xff0c;实现用户登录需要开发哪些内容。 vue3elementplus实现用户的登录 要实现用户的登录功能&#xff0…

微服务学习笔记-----Nacos安装教程(Windows和Linux版本)

Nacos安装教程 Nacos安装指南1.Windows安装1.1.下载安装包1.2.解压1.3.端口配置1.4.启动1.5.访问 2.Linux安装2.1.安装JDK2.2.上传安装包2.3.解压2.4.端口配置2.5.启动 3.Nacos的依赖 Nacos安装指南 1.Windows安装 开发阶段采用单机安装即可。 1.1.下载安装包 在Nacos的Git…

性能测试如何做?从0到1性能测试实战(手把手教)

目录&#xff1a;导读 前言一、Python编程入门到精通二、接口自动化项目实战三、Web自动化项目实战四、App自动化项目实战五、一线大厂简历六、测试开发DevOps体系七、常用自动化测试工具八、JMeter性能测试九、总结&#xff08;尾部小惊喜&#xff09; 前言 什么是性能测试&a…

[Bug] ls: reading directory ‘.‘: Input/output error, Ubuntu系统某一挂载目录下数据全部消失

ls: reading directory .: Input/output error, Ubuntu系统某一挂载目录下数据全部消失 问题描述解决总结 问题描述 当天下午离开工作地点之前 启动了程序, 第二天早上来公司后发现: 工作目录/home/xxx/workspace下所有的代码文件全部消失, 然后之前运行的程序也是因为找不到代…

LeetCode[315]计算右侧小于当前元素的个数

难度&#xff1a;Hard 题目&#xff1a; 给你一个整数数组 nums &#xff0c;按要求返回一个新数组 counts 。数组 counts 有该性质&#xff1a; counts[i] 的值是 nums[i] 右侧小于 nums[i] 的元素的数量。 示例 1&#xff1a; 输入&#xff1a;nums [5,2,6,1] 输出&#…

美团圈圈拉新10天7万佣金,是如何做到的?简单轻松易上手

科思创业汇 大家好&#xff0c;这里是科思创业汇&#xff0c;一个轻资产创业孵化平台。赚钱的方式有很多种&#xff0c;我希望在科思创业汇能够给你带来最快乐的那一种&#xff01; 这两天&#xff0c;美团圆圈火了出来。 1.这个项目是什么&#xff1f; 三月底至四月初&…

easy弹窗默认最大化居中偏左显示问题解决

easy弹窗默认最大化居中偏左显示问题解决 0问题描述1尝试方法10弹窗加top,left 0参数11添加部分方法体 2最后解决方法3记录 0问题描述 如下图&#xff0c;弹窗默认最大化应该覆盖全屏幕&#xff0c;页面大小和屏幕大小是匹配得&#xff0c;但页面位置偏左了。 查看CSS发现整个…

SpringBoot整合RocketMQ 不多说直接上代码

文章目录 依赖yaml配置生产端发送消息消费端异步下单Business生产端消费端 依赖 <!--整合的依赖--> <dependency><groupId>org.apache.rocketmq</groupId><artifactId>rocketmq-spring-boot-starter</artifactId><version>2.2.2<…