Quartz中集群模式源码级解析

news2024/10/6 6:00:34

文章目录

        • 案例搭建

案例搭建

创建一个JOB实现类

package org.quartz.examples.example13;

import org.quartz.*;

import java.util.Date;

/**
 * This job has the same functionality of SimpleRecoveryJob except that this job implements is 'stateful', in that it
 * will have it's data (JobDataMap) automatically re-persisted after each execution, and only one instance of the
 * JobDetail can be executed at a time.
 *
 * @author Bill Kratzer
 */
@PersistJobDataAfterExecution
@DisallowConcurrentExecution
public class SimpleRecoveryStatefulJob implements Job{

    public SimpleRecoveryStatefulJob() {

    }


    private static final String COUNT = "count";

    /**
     * <p>
     * Called by the <code>{@link org.quartz.Scheduler}</code> when a <code>{@link org.quartz.Trigger}</code> fires that
     * is associated with the <code>Job</code>.
     * </p>
     *
     * @throws JobExecutionException if there is an exception while executing the job.
     */
    public void execute(JobExecutionContext context) throws JobExecutionException {

        JobKey jobKey = context.getJobDetail().getKey();

        // if the job is recovering print a message
        if (context.isRecovering()) {
            System.err.println("SimpleRecoveryJob: " + jobKey + " RECOVERING at " + new Date());
        } else {
            System.err.println("SimpleRecoveryJob: " + jobKey + " starting at " + new Date());
        }

        // delay for ten seconds
        long delay = 10L * 1000L;
        try {
            Thread.sleep(delay);
        } catch (Exception e) {
            //
        }

        JobDataMap data = context.getJobDetail().getJobDataMap();
        int count;
        if (data.containsKey(COUNT)) {
            count = data.getInt(COUNT);
        } else {
            count = 0;
        }
        count++;
        data.put(COUNT, count);

        System.err.println("SimpleRecoveryJob: " + jobKey + " done at " + new Date() + "\n Execution #" + count);

    }

}

创建一个测试类

/*
 * All content copyright Terracotta, Inc., unless otherwise indicated. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
 * use this file except in compliance with the License. You may obtain a copy
 * of the License at
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 *
 */

package org.quartz.examples.example13;

import org.quartz.DateBuilder.IntervalUnit;
import org.quartz.JobDetail;
import org.quartz.Scheduler;
import org.quartz.SchedulerFactory;
import org.quartz.SimpleTrigger;
import org.quartz.impl.StdSchedulerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.quartz.DateBuilder.futureDate;
import static org.quartz.JobBuilder.newJob;
import static org.quartz.SimpleScheduleBuilder.simpleSchedule;
import static org.quartz.TriggerBuilder.newTrigger;

/**
 * Used to test/show the clustering features of JDBCJobStore (JobStoreTX or JobStoreCMT).
 * <p>
 * All instances MUST use a different properties file, because their instance Ids must be different, however all other
 * properties should be the same.
 * </p>
 * <p>
 * If you want it to clear out existing jobs & triggers, pass a command-line argument called "clearJobs".
 * </p>
 * <p>
 * You should probably start with a "fresh" set of tables (assuming you may have some data lingering in it from other
 * tests), since mixing data from a non-clustered setup with a clustered one can be bad.
 * </p>
 * <p>
 * Try killing one of the cluster instances while they are running, and see that the remaining instance(s) recover the
 * in-progress jobs. Note that detection of the failure may take up to 15 or so seconds with the default settings.
 * </p>
 * <p>
 * Also try running it with/without the shutdown-hook plugin registered with the scheduler.
 * (org.quartz.plugins.management.ShutdownHookPlugin).
 * </p>
 * <p>
 * <i>Note:</i> Never run clustering on separate machines, unless their clocks are synchronized using some form of
 * time-sync service (such as an NTP daemon).
 * </p>
 *
 * @author James House
 * @see SimpleRecoveryJob
 * @see SimpleRecoveryStatefulJob
 */
public class ClusterExample {

    private static Logger _log = LoggerFactory.getLogger(ClusterExample.class);

    public void run(boolean inClearJobs, boolean inScheduleJobs) throws Exception {

        // First we must get a reference to a scheduler
        SchedulerFactory sf = new StdSchedulerFactory();
        Scheduler sched = sf.getScheduler();

        if (inClearJobs) {
            _log.warn("***** Deleting existing jobs/triggers *****");
            sched.clear();
        }

        _log.info("------- Initialization Complete -----------");

        if (inScheduleJobs) {

            _log.info("------- Scheduling Jobs ------------------");

            String schedId = sched.getSchedulerInstanceId();

            int count = 1;
            
            JobDetail job = newJob(SimpleRecoveryStatefulJob.class).withIdentity("job_" + count, schedId) // put triggers in group named
                    // after the cluster node
                    // instance just to
                    // distinguish (in logging)
                    // what was scheduled from
                    // where
                    .requestRecovery() // ask scheduler to re-execute this job if it was in progress when the scheduler went
                    // down...
                    .build();

            SimpleTrigger trigger = newTrigger().withIdentity("triger_" + count, schedId)
                    .startAt(futureDate(1, IntervalUnit.SECOND))
                    .withSchedule(simpleSchedule().withRepeatCount(10).withIntervalInSeconds(30)).build();

            _log.info(job.getKey() + " will run at: " + trigger.getNextFireTime() + " and repeat: "
                    + trigger.getRepeatCount() + " times, every " + trigger.getRepeatInterval() / 1000 + " seconds");
            sched.scheduleJob(job, trigger);
        }

        // jobs don't start firing until start() has been called...
        _log.info("------- Starting Scheduler ---------------");
        sched.start();
        _log.info("------- Started Scheduler ----------------");

        _log.info("------- Waiting for one hour... ----------");
        try {
            Thread.sleep(36000L * 1000L);
        } catch (Exception e) {
            //
        }

        _log.info("------- Shutting Down --------------------");
        sched.shutdown();
        _log.info("------- Shutdown Complete ----------------");
    }

    public static void main(String[] args) throws Exception {
        boolean clearJobs = true;
        boolean scheduleJobs = true;

        for (String arg : args) {
            if (arg.equalsIgnoreCase("clearJobs")) {
                clearJobs = true;
            } else if (arg.equalsIgnoreCase("dontScheduleJobs")) {
                scheduleJobs = false;
            }
        }

        ClusterExample example = new ClusterExample();
        example.run(clearJobs, scheduleJobs);
    }
}

这里启动两个实例,需要两份配置文件,必须明确指定实例编号。instance1.properties(实例instance_one)内容如下


#============================================================================
# Configure Main Scheduler Properties  
#============================================================================

org.quartz.scheduler.instanceName: TestScheduler
org.quartz.scheduler.instanceId: instance_one

org.quartz.scheduler.skipUpdateCheck: true

#============================================================================
# Configure ThreadPool  
#============================================================================

org.quartz.threadPool.class: org.quartz.simpl.SimpleThreadPool
org.quartz.threadPool.threadCount: 5
org.quartz.threadPool.threadPriority: 5

#============================================================================
# Configure JobStore  
#============================================================================

org.quartz.jobStore.misfireThreshold: 60000

org.quartz.jobStore.class=org.quartz.impl.jdbcjobstore.JobStoreTX
org.quartz.jobStore.useProperties=false
org.quartz.jobStore.driverDelegateClass = org.quartz.impl.jdbcjobstore.StdJDBCDelegate
org.quartz.jobStore.tablePrefix = QRTZ_
org.quartz.jobStore.dataSource = myDS

#============================================================================
# Configure Datasources
#============================================================================

org.quartz.dataSource.myDS.connectionProvider.class:com.alibaba.druid.support.quartz.DruidQuartzConnectionProvider
org.quartz.dataSource.myDS.driverClassName = com.mysql.cj.jdbc.Driver
org.quartz.dataSource.myDS.url = jdbc:mysql://191.168.1.60:3306/quartz?characterEncoding=utf-8
org.quartz.dataSource.myDS.username = tools_user
org.quartz.dataSource.myDS.password = xams_tools_20230714
org.quartz.dataSource.myDS.maxActive: 5
org.quartz.dataSource.myDS.validationQuery: select 0
org.quartz.jobStore.isClustered=true

#============================================================================
# Other Example Delegates
#============================================================================
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.DB2v6Delegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.DB2v7Delegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.DriverDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.HSQLDBDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.MSSQLDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.PointbaseDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.PostgreSQLDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.StdJDBCDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.WebLogicDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.oracle.OracleDelegate
#org.quartz.jobStore.driverDelegateClass=org.quartz.impl.jdbcjobstore.oracle.WebLogicOracleDelegate

#============================================================================
# Configure Plugins 
#============================================================================

#org.quartz.plugin.shutdownHook.class: org.quartz.plugins.management.ShutdownHookPlugin
#org.quartz.plugin.shutdownHook.cleanShutdown: true


#org.quartz.plugin.triggHistory.class: org.quartz.plugins.history.LoggingJobHistoryPlugin

instance2.properties(实例instance_two)与上面除了org.quartz.scheduler.instanceId配置为instance_two,其他一模一样。

启动程序,需要配置两个启动器。通过org.quartz.properties启动参数指定不同的配置文件。
在这里插入图片描述
在这里插入图片描述#### 源码分析

集群模式下,使用数据库锁,而不是普通的内存锁
参考org.quartz.impl.jdbcjobstore.JobStoreSupport#initialize

// If the user hasn't specified an explicit lock handler, then 
// choose one based on CMT/Clustered/UseDBLocks.
if (getLockHandler() == null) {
    
    // If the user hasn't specified an explicit lock handler, 
    // then we *must* use DB locks with clustering
    if (isClustered()) {
// 集群模式 使用数据库锁
        setUseDBLocks(true);
    }
    
    if (getUseDBLocks()) {
// 使用数据库锁
        // ... 特定数据库
        getLog().info("Using db table-based data access locking (synchronization).");
        setLockHandler(new StdRowLockSemaphore(getTablePrefix(), getInstanceName(), getSelectWithLockSQL()));
    } else {
        getLog().info(
            "Using thread monitor-based data access locking (synchronization).");
        setLockHandler(new SimpleSemaphore());
    }
}

StdRowLockSemaphore通过数据库中的QRTZ_LOCKS来控制对资源的并发操作,保证数据安全。在这个锁实例构造时会初始化内部的属性,比如

sql = "SELECT * FROM {0}LOCKS WHERE SCHED_NAME = {1} AND LOCK_NAME = ? FOR UPDATE"
insertSql = "INSERT INTO {0}LOCKS(SCHED_NAME, LOCK_NAME) VALUES ({1}, ?)"
tablePrefix = "QRTZ_"
schedName = "TestScheduler"
expandedSQL = "SELECT * FROM QRTZ_LOCKS WHERE SCHED_NAME = 'TestScheduler' AND LOCK_NAME = ? FOR UPDATE"
expandedInsertSQL = "INSERT INTO QRTZ_LOCKS(SCHED_NAME, LOCK_NAME) VALUES ('TestScheduler', ?)"
schedNameLiteral = "'TestScheduler'"

通过DBSemaphore#obtainLock操作获取锁就会执行上面的expandedSQL,如果对应的数据不存在,则通过expandedInsertSQL 插入数据。

集群模式下,还会启动一个后台线程ClusterManager用于定时执行节点签到操作以及清除长时间未签到的节点。参考JobStoreSupport#schedulerStarted

if (isClustered()) {
// 创建另一个线程
    clusterManagementThread = new ClusterManager();
    if(initializersLoader != null)
        clusterManagementThread.setContextClassLoader(initializersLoader);
    clusterManagementThread.initialize();
} else {
    try {
        recoverJobs();
    } catch (SchedulerException se) {
        throw new SchedulerConfigException(
                "Failure occured during job recovery.", se);
    }
}	

最终会调用到org.quartz.impl.jdbcjobstore.JobStoreSupport#doCheckin方法

protected boolean doCheckin() throws JobPersistenceException {
    boolean transOwner = false;
    boolean transStateOwner = false;
    boolean recovered = false;

    Connection conn = getNonManagedTXConnection();
    try {
        // Other than the first time, always checkin first to make sure there is 
        // work to be done before we acquire the lock (since that is expensive, 
        // and is almost never necessary).  This must be done in a separate
        // transaction to prevent a deadlock under recovery conditions.
        List<SchedulerStateRecord> failedRecords = null;
        if (!firstCheckIn) {
            failedRecords = clusterCheckIn(conn);
            commitConnection(conn);
        }
        
        if (firstCheckIn || (failedRecords.size() > 0)) {
            getLockHandler().obtainLock(conn, LOCK_STATE_ACCESS);
            transStateOwner = true;

            // Now that we own the lock, make sure we still have work to do. 
            // The first time through, we also need to make sure we update/create our state record
            failedRecords = (firstCheckIn) ? clusterCheckIn(conn) : findFailedInstances(conn);

            if (failedRecords.size() > 0) {
                getLockHandler().obtainLock(conn, LOCK_TRIGGER_ACCESS);
                //getLockHandler().obtainLock(conn, LOCK_JOB_ACCESS);
                transOwner = true;

                clusterRecover(conn, failedRecords);
                recovered = true;
            }
        }
        
        commitConnection(conn);
    } catch (JobPersistenceException e) {
        rollbackConnection(conn);
        throw e;
    } finally {
        try {
            releaseLock(LOCK_TRIGGER_ACCESS, transOwner);
        } finally {
            try {
                releaseLock(LOCK_STATE_ACCESS, transStateOwner);
            } finally {
                cleanupConnection(conn);
            }
        }
    }

    firstCheckIn = false;

    return recovered;
}

这里通过firstCheckIn 标识是否第一次执行签到操作。如果是第一次签到操作,则要考虑恢复当前节点的任务和触发器状态。所以在org.quartz.impl.jdbcjobstore.JobStoreSupport#findFailedInstances方法中,查询失效实例时,如果是第一次,都把当前实例作为失效的,这样后面就会进行状态恢复操作。而对于非当前节点,则是比较当前时间与上一次签到+一定阈值进行比较,也就是说当超过了一定时间,其他某个节点未进行签到操作,则也认为是失效节点。如果是第一次,还会在JobStoreSupport#findOrphanedFailedInstances中查询QRTZ_FIRED_TRIGGERS已触发记录对应的实例信息,作为失效节点。失效节点的判断逻辑源码如下所示

/**
 * Get a list of all scheduler instances in the cluster that may have failed.
 * This includes this scheduler if it is checking in for the first time.
 */
protected List<SchedulerStateRecord> findFailedInstances(Connection conn)
    throws JobPersistenceException {
    try {
        List<SchedulerStateRecord> failedInstances = new LinkedList<SchedulerStateRecord>();
        boolean foundThisScheduler = false;
        long timeNow = System.currentTimeMillis();
        
        List<SchedulerStateRecord> states = getDelegate().selectSchedulerStateRecords(conn, null);

        for(SchedulerStateRecord rec: states) {
    
            // find own record...
            if (rec.getSchedulerInstanceId().equals(getInstanceId())) {
                foundThisScheduler = true;
                if (firstCheckIn) {
                    failedInstances.add(rec);
                }
            } else {
                // find failed instances...
                if (calcFailedIfAfter(rec) < timeNow) {
                    failedInstances.add(rec);
                }
            }
        }
        
        // The first time through, also check for orphaned fired triggers.
        if (firstCheckIn) {
            failedInstances.addAll(findOrphanedFailedInstances(conn, states));
        }
        
        // If not the first time but we didn't find our own instance, then
        // Someone must have done recovery for us.
        if ((!foundThisScheduler) && (!firstCheckIn)) {
            // FUTURE_TODO: revisit when handle self-failed-out impl'ed (see FUTURE_TODO in clusterCheckIn() below)
            getLog().warn(
                "This scheduler instance (" + getInstanceId() + ") is still " + 
                "active but was recovered by another instance in the cluster.  " +
                "This may cause inconsistent behavior.");
        }
        
        return failedInstances;
    } catch (Exception e) {
        lastCheckin = System.currentTimeMillis();
        throw new JobPersistenceException("Failure identifying failed instances when checking-in: "
                + e.getMessage(), e);
    }
}

在进行失效节点的扫描之后,会进行当前节点的签到操作。

protected List<SchedulerStateRecord> clusterCheckIn(Connection conn)
    throws JobPersistenceException {

    List<SchedulerStateRecord> failedInstances = findFailedInstances(conn);
    
    try {
        // FUTURE_TODO: handle self-failed-out

        // check in...
        lastCheckin = System.currentTimeMillis();
        if(getDelegate().updateSchedulerState(conn, getInstanceId(), lastCheckin) == 0) {
            getDelegate().insertSchedulerState(conn, getInstanceId(),
                    lastCheckin, getClusterCheckinInterval());
        }
        
    } catch (Exception e) {
        throw new JobPersistenceException("Failure updating scheduler state when checking-in: "
                + e.getMessage(), e);
    }

    return failedInstances;
}

最后会针对失效节点进行补偿或清理工作。查找失效节点必须获取STATE_ACCESS锁,然后失效节点补偿操作还需要获取TRIGGER_ACCESS锁。
在这里插入图片描述
失效节点补偿操作,分为以下几步:

  • 查询QRTZ_FIRED_TRIGGERS表中当前失效节点对应的数据,会尝试进行状态的修改,比如BLOCKED->WAITING,PAUSED_BLOCKED->PAUSED,释放被阻塞的触发器。ACQUIRED->WAITING,释放准备执行的触发器。如果任务不支持并发,还会恢复QRTZ_TRIGGERS状态为BLOCKED->WAITING,PAUSED_BLOCKED->PAUSED。这样这些被恢复状态的任务才能被再次查询并触发。
  • 删除失效节点对应的QRTZ_FIRED_TRIGGERS表数据,节点已失效,不会再执行
getDelegate().deleteFiredTriggers(conn,rec.getSchedulerInstanceId());
  • 针对第一步查询的QRTZ_FIRED_TRIGGERS表中数据,判断对应的QRTZ_TRIGGERS表中状态是否为COMPLETE状态,由于QRTZ_FIRED_TRIGGERS表中数据已在第二步删除,状态为COMPLETE则代表任务已经结束。则会删除触发器以及对应的任务。
// Check if any of the fired triggers we just deleted were the last fired trigger
// records of a COMPLETE trigger.
int completeCount = 0;
for (TriggerKey triggerKey : triggerKeys) {

    if (getDelegate().selectTriggerState(conn, triggerKey).
            equals(STATE_COMPLETE)) {
        List<FiredTriggerRecord> firedTriggers =
                getDelegate().selectFiredTriggerRecords(conn, triggerKey.getName(), triggerKey.getGroup());
        if (firedTriggers.isEmpty()) {

            if (removeTrigger(conn, triggerKey)) {
                completeCount++;
            }
        }
    }
}
  • 最后,如果失效节点不是当前节点,则删除QRTZ_SCHEDULER_STATE表中该节点对应的数据。代表溢出该失效节点。
if (!rec.getSchedulerInstanceId().equals(getInstanceId())) {
    getDelegate().deleteSchedulerState(conn,
            rec.getSchedulerInstanceId());
}

至此,失效补偿工作完成。

总结一下:Quartz集群模式与其他模式的区别主要有两点:首先操作的锁要使用两个实例都可以公用的锁,一般直接使用数据库锁,另外,会创建一个后台线程进行定时签到,一方面为当前节点实例续命,同时发现失效节点,并进行节点补偿。

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/827386.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Spring框架——IOC配置文件方式

Spring框架的概述和入门 目录 Spring框架的概述和入门 什么是Spring框架 Spring框架的特点 Spring框架的IOC核心功能快速入门 Spring框架中的工厂&#xff08;了解&#xff09; Spring 创建Bean对象的三种方式 Spring框架的Bean管理的配置文件方式 Spring框架中标签的配…

Token与Cookie、Session登录机制

Cookie 背景 Web 的兴起&#xff08;所谓交互式就是你不光可以浏览&#xff0c;还可以登录&#xff0c;发评论&#xff0c;购物等用户操作的行为&#xff09;&#xff0c;单纯地浏览 web 已经无法满足人们的要求&#xff0c;比如随着网上购物的兴起&#xff0c;需要记录用户的…

寻找丢失数字:数学与位运算的解密之旅

本篇博客会讲解力扣“268. 丢失的数字”的解题思路&#xff0c;这是题目链接。 注意进阶中的描述&#xff1a;你能否实现线性时间复杂度、仅使用额外常数空间的算法解决此问题&#xff1f;这里我会讲解两种思路&#xff0c;它们的时间复杂度是O(N)&#xff0c;空间复杂度是O(1)…

STM32F1基于标准库ST7735 1.8‘‘LCD显示DHT11数据

STM32基于标准库ST7735 1.8‘’LCD显示DHT11数据 &#x1f4cd;HAL库驱动可以参考&#xff1a;《STM32基于HAL工程读取DHT11数据》&#x1f33c;显示效果&#xff1a; &#x1f33b;ST7735 128x160 1.8’LCD屏幕 &#x1f4cc;屏幕资料和相关驱动可以参考《1.8寸TFT LCD128…

JDK各版本重要变革

各版本更新详情 JDK8(LTS)--2014/3 语法层面 lambda表达式(重要特色之一) 一种特殊的匿名内部类,语法更加简洁允许把函数作为一个方法的参数,将代码象数据一样传递&#xff0c;即将函数作为方法参数传递基本语法: <函数式接口> <变量名> (参数...) -> { 方法…

迷你主机中的战斗机 Intel NUC 12 Serpent Canyon拆解

千呼万唤始出来&#xff0c;新一代游戏和创作者性能怪兽 mini主机 NUC 12 Serpent Canyon&#xff08;巨蛇峡谷终于发售了&#xff0c;以超紧凑的 2.5 升尺寸提供用户所需的所有性能和创新功能。NUC 12 Enthusiast 还首次将 Intel Deep Link 引入桌面&#xff0c;使 CPU 和 GPU…

类的继承和super关键字的使用(JAVA)

继承 所有的OOP语言都会有三个特征&#xff1a; 封装&#xff08;点击可跳转&#xff09;&#xff1b;继承&#xff1b;多态 为什么会有继承呢&#xff1f;可以先看下面的例子&#xff1a; 上面这两个类中的代码很相似因为它们只有最后一个方法不同其它的都相同&#xff0c;这样…

DbVisualizer Pro Crack

DbVisualizer Pro Crack DbVisualizer是适用于开发人员、DBA和分析师的通用数据库工具。它是最终的解决方案&#xff0c;因为相同的工具可以在访问各种数据库的所有主要操作系统上使用。支持的数据库Amazon Redshift、DB2 LUW、Exasol、H2、Informix、JavaDB/Derby、Microsoft …

【项目 进程10】2.21 alarm函数 2.22setitimer定时器函数

2.21 alarm函数 #include <unistd.h> unsigned int alarm(unsigned int seconds);功能&#xff1a;设置定时器&#xff08;闹钟&#xff09;。函数调用&#xff0c;开始倒计时&#xff0c;当倒计时为0的时候&#xff0c; 函数会给当前的进程发送一个信号&#xff1a;SIG…

C++中内存的动态管理

我们在C语言中了解到可以在栈区动态开辟空间&#xff0c;并且用完要进行释放&#xff0c;防止内存泄漏。 引入 C中也有可以进行动态开辟空间和释放空间的操作符new 、delete&#xff0c;虽然C中也可以用malloc、calloc、realloc、free函数&#xff0c;但是C中引入了类&#x…

宋浩概率论笔记(二)随机变量

本章节内容较多&#xff0c;是概率论与数理统计中最为重要的章节&#xff0c;对于概率密度和分布函数的理解与计算要牢牢掌握&#xff0c;才能在后期的学习中更得心应手。

小研究 - 微服务系统服务依赖发现技术综述(一)

微服务架构得到了广泛的部署与应用, 提升了软件系统开发的效率, 降低了系统更新与维护的成本, 提高了系统的可扩展性. 但微服务变更频繁、异构融合等特点使得微服务故障频发、其故障传播快且影响大, 同时微服务间复杂的调用依赖关系或逻辑依赖关系又使得其故障难以被及时、准确…

【Azure上云项目实战】 合规性的身份验证与访问控制:在 Azure 中实现符合 PCI DSS 要求的架构设计

文章目录 一、开篇写在前面二、项目背景及介绍三、Azure PCI DSS 项目架构及组件四、身份验证、访问控制4.1 三层防御控制4.2 三层部署结构 五、跳板机六、与 PCI DSS 要求的关系七、该篇总结&#xff08;重要&#xff09;写在文末 一、开篇写在前面 各位博客阅读者们以及对云…

【A200】Ubuntu18.04 + ROS-Melodic + 比业电子VISIOSCAN雷达 评测

大家好&#xff0c;我是虎哥&#xff0c;朋友介绍&#xff0c;有一款单线激光雷达&#xff0c;25米的检测距离&#xff0c;有80HZ的扫描频率&#xff0c;而且角度分辨率最高可以到0.1&#xff0c;这个参数我确实没有见过&#xff0c;所以立刻着手从厂家那申请到了VISIOSCAN雷达…

Android的Handler消息通信详解

目录 背景 1. Handler基本使用 2. Handler的Looper源码分析 3. Handler的Message以及消息池、MessageQueue 4. Handler的Native实现 4.1 MessageQueue 4.2 Native结构体和类 4.2.1 Message结构体 4.2.2 消息处理类 4.2.3 回调类 4.2.5 ALooper类 5. 总结&…

轻量级目标检测模型NanoDet-Plus微调、部署(保姆级教学)

前言 NanoDet-Plus是超快速、高精度的轻量级无锚物体检测模型&#xff0c;github项目文件。可以在移动设备上实时检测。其主要特点是 超轻量&#xff1a;模型文件仅980KB(INT8)、1.8MB(FP16)超快&#xff1a;移动ARM CPU上97fps&#xff08;10.23ms&#xff09;高精度&#xf…

C++内存管理(动态内存开辟)

我们在C语言当中想要使用堆区的空间的时候就需要使用malloc函数进行手动的申请&#xff0c;但是我们在申请的时候需要手动进行计算&#xff0c;经过计算之后还需要进行判空操作&#xff0c;并且还不能进行任意值的初始化。这一切看起来在学习完C当中的动态开辟之前显得很正常&a…

最新版本mac版Idea 激活Jerbel实现热部署

1.环境准备 1.安装docker desktop 客户端创建本地服务 2.创建guid 3.随便准备一个正确格式的邮箱 2.具体操作 1.通过提供的镜像直接搭建本地服务 docker pull qierkang/golang-reverseproxy docker run -d -p 8888:8888 qierkang/golang-reverseproxy2.guid 通过如下网址直…

小C说历史(人物介绍第一篇):传奇人物Linus Torvalds 缔造Linux和Git

传奇人物Linus Torvalds 缔造Linux和Git Linus Torvalds&#xff0c;1969年12月28日出生于芬兰的赫尔辛基&#xff0c;Linux核心的创作者。当Linus十岁时&#xff0c;他的祖父&#xff0c;赫尔辛基大学的一位统计教授&#xff0c;购买了一台Commodore VIC-20计算机。Linus帮助他…

Mybatis-Plus面向实用知识点——结合SpringBoot

目录 环境配置基本流程各类中的方法BaseMapperIServiceCOUNTGETQueryListPageRemoveSaveUpdate 环境配置 参考java项目各框架环境配置 基本流程 创建Mapper Mapper public interface MyMapper extends BaseMapper<Entity>{}创建Service public interface MyService …