MyBatis源码学习四之二级缓存
MyBatis的缓存使用的也比较多,而缓存的都实现了一个父类的接口Cache
。
一、加载缓存类PerputualCache
public static void main(String[] args) {
InputStream inputStream = null;
try {
inputStream = Resources.getResourceAsStream("mybatis-config.xml");
SqlSessionFactory factory = new SqlSessionFactoryBuilder().build(inputStream);
SqlSession session = factory.openSession();
UserMapper mapper = session.getMapper(UserMapper.class);
User user = mapper.getUserNameById("20");
System.err.println(user.getId() + user.getPsnname());
User user = mapper.getUserNameById("20");
System.err.println(user.getId() + user.getPsnname());
} catch (IOException e) {
throw new RuntimeException(e);
}
}
mybatis-config.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE configuration
PUBLIC "-//mybatis.org//DTD Config 3.0//EN"
"http://mybatis.org/dtd/mybatis-3-config.dtd">
<configuration>
<settings>
<setting name="cacheEnabled" value="true"/>
</settings>
...
<mappers>
<mapper resource="mapper/UserMapper.xml"></mapper>
</mappers>
</configuration>
UserMapper.xml
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mapper PUBLIC "-//mybatis.org//DTD Mapper 3.0//EN" "http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.yt.study.mybatis.UserMapper">
<cache eviction="FIFO"></cache>
<select id="getUserNameById" useCache="true">
select * from user where id=#{id}
</select>
</mapper>
SqlSessionFactoryBuilder.build
public SqlSessionFactory build(InputStream inputStream, String environment, Properties properties) {
try {
XMLConfigBuilder parser = new XMLConfigBuilder(inputStream, environment, properties);
return build(parser.parse());
} catch (Exception e) {
throw ExceptionFactory.wrapException("Error building SqlSession.", e);
} finally {
ErrorContext.instance().reset();
try {
if (inputStream != null) {
inputStream.close();
}
} catch (IOException e) {
// Intentionally ignore. Prefer previous error.
}
}
}
public SqlSessionFactory build(Configuration config) {
return new DefaultSqlSessionFactory(config);
}
XMLConfigBuilder.parse
public Configuration parse() {
if (parsed) {
throw new BuilderException("Each XMLConfigBuilder can only be used once.");
}
parsed = true;
parseConfiguration(parser.evalNode("/configuration"));
return configuration;
}
private void parseConfiguration(XNode root) {
try {
// issue #117 read properties first
propertiesElement(root.evalNode("properties"));
Properties settings = settingsAsProperties(root.evalNode("settings"));
loadCustomVfs(settings);
loadCustomLogImpl(settings);
typeAliasesElement(root.evalNode("typeAliases"));
pluginElement(root.evalNode("plugins"));
objectFactoryElement(root.evalNode("objectFactory"));
objectWrapperFactoryElement(root.evalNode("objectWrapperFactory"));
reflectorFactoryElement(root.evalNode("reflectorFactory"));
settingsElement(settings);
// read it after objectFactory and objectWrapperFactory issue #631
environmentsElement(root.evalNode("environments"));
databaseIdProviderElement(root.evalNode("databaseIdProvider"));
typeHandlerElement(root.evalNode("typeHandlers"));
mapperElement(root.evalNode("mappers"));
} catch (Exception e) {
throw new BuilderException("Error parsing SQL Mapper Configuration. Cause: " + e, e);
}
}
关键点 mapperElement
private void mapperElement(XNode parent) throws Exception {
if (parent != null) {
for (XNode child : parent.getChildren()) {
if ("package".equals(child.getName())) {
String mapperPackage = child.getStringAttribute("name");
configuration.addMappers(mapperPackage);
} else {
String resource = child.getStringAttribute("resource");
String url = child.getStringAttribute("url");
String mapperClass = child.getStringAttribute("class");
if (resource != null && url == null && mapperClass == null) {
// 进入到这里
ErrorContext.instance().resource(resource);
try (InputStream inputStream = Resources.getResourceAsStream(resource)) {
XMLMapperBuilder mapperParser = new XMLMapperBuilder(inputStream, configuration, resource,
configuration.getSqlFragments());
mapperParser.parse();
}
} else if (resource == null && url != null && mapperClass == null) {
ErrorContext.instance().resource(url);
try (InputStream inputStream = Resources.getUrlAsStream(url)) {
XMLMapperBuilder mapperParser = new XMLMapperBuilder(inputStream, configuration, url,
configuration.getSqlFragments());
mapperParser.parse();
}
} else if (resource == null && url == null && mapperClass != null) {
Class<?> mapperInterface = Resources.classForName(mapperClass);
configuration.addMapper(mapperInterface);
} else {
throw new BuilderException(
"A mapper element may only specify a url, resource or class, but not more than one.");
}
}
}
}
}
我们配置的是
<mappers>
<mapper resource="mapper/UserMapper.xml"></mapper>
</mappers>
所以会进入到解析mapper的resource流程中。
然后再看mapperParser.parse()
。
XMLMapperBuilder.parse
这里有解析cache-ref
标签和cache
标签的。
而cache-ref
标签的作用:多个命名空间中共享相同的缓存配置和实例。这里不看这个标签了。我们继续来看cache
标签
public void parse() {
if (!configuration.isResourceLoaded(resource)) {
configurationElement(parser.evalNode("/mapper"));
configuration.addLoadedResource(resource);
bindMapperForNamespace();
}
parsePendingResultMaps();
parsePendingCacheRefs();
parsePendingStatements();
}
private void configurationElement(XNode context) {
try {
String namespace = context.getStringAttribute("namespace");
if (namespace == null || namespace.isEmpty()) {
throw new BuilderException("Mapper's namespace cannot be empty");
}
builderAssistant.setCurrentNamespace(namespace);
cacheRefElement(context.evalNode("cache-ref"));
cacheElement(context.evalNode("cache"));
parameterMapElement(context.evalNodes("/mapper/parameterMap"));
resultMapElements(context.evalNodes("/mapper/resultMap"));
sqlElement(context.evalNodes("/mapper/sql"));
buildStatementFromContext(context.evalNodes("select|insert|update|delete"));
} catch (Exception e) {
throw new BuilderException("Error parsing Mapper XML. The XML location is '" + resource + "'. Cause: " + e, e);
}
}
private void cacheElement(XNode context) {
if (context != null) {
// 类型:如果没有就是PERPETUAL
String type = context.getStringAttribute("type", "PERPETUAL");
Class<? extends Cache> typeClass = typeAliasRegistry.resolveAlias(type);
// 淘汰策略:LRU
String eviction = context.getStringAttribute("eviction", "LRU");
Class<? extends Cache> evictionClass = typeAliasRegistry.resolveAlias(eviction);
Long flushInterval = context.getLongAttribute("flushInterval");
Integer size = context.getIntAttribute("size");
boolean readWrite = !context.getBooleanAttribute("readOnly", false);
boolean blocking = context.getBooleanAttribute("blocking", false);
Properties props = context.getChildrenAsProperties();
builderAssistant.useNewCache(typeClass, evictionClass, flushInterval, size, readWrite, blocking, props);
}
}
BuilderAssistant.useNewCache
利用这个builder模式构建了一个Cache缓存,而在build
方法中,还有一些初始化的东西。
public Cache useNewCache(Class<? extends Cache> typeClass, Class<? extends Cache> evictionClass, Long flushInterval,
Integer size, boolean readWrite, boolean blocking, Properties props) {
Cache cache = new CacheBuilder(currentNamespace).implementation(valueOrDefault(typeClass, PerpetualCache.class))
.addDecorator(valueOrDefault(evictionClass, LruCache.class)).clearInterval(flushInterval).size(size)
.readWrite(readWrite).blocking(blocking).properties(props).build();
configuration.addCache(cache);
currentCache = cache;
return cache;
}
CacheBuilder.build
public Cache build() {
// 默认是PerpetualCache
setDefaultImplementations();
// 先实现化一个BaseCache出来。
Cache cache = newBaseCacheInstance(implementation, id);
// 这里则是支持对缓存中的属性进行赋值。而默认是PerpetualCache是没有属性的,所以不会进入,而使用自定义缓存的话,这里就用的上了。
setCacheProperties(cache);
// issue #352, do not apply decorators to custom caches
if (PerpetualCache.class.equals(cache.getClass())) {
// 这里把setDefaultImplementations中增加的LruCache再进行包装一下
for (Class<? extends Cache> decorator : decorators) {
cache = newCacheDecoratorInstance(decorator, cache);
setCacheProperties(cache);
}
// 添加一系列的标准装饰
cache = setStandardDecorators(cache);
} else if (!LoggingCache.class.isAssignableFrom(cache.getClass())) {
cache = new LoggingCache(cache);
}
return cache;
}
private void setDefaultImplementations() {
if (implementation == null) {
implementation = PerpetualCache.class;
// 默认给装饰器列表增加了LruCache
if (decorators.isEmpty()) {
decorators.add(LruCache.class);
}
}
}
private Cache setStandardDecorators(Cache cache) {
try {
MetaObject metaCache = SystemMetaObject.forObject(cache);
if (size != null && metaCache.hasSetter("size")) {
metaCache.setValue("size", size);
}
// 清空数据时间不为空的话,增加ScheduledCache
if (clearInterval != null) {
cache = new ScheduledCache(cache);
((ScheduledCache) cache).setClearInterval(clearInterval);
}
// boolean readWrite = !context.getBooleanAttribute("readOnly", false);
// 这里的属性是否可读,官网上的用的也是readOnly,在解析的时候,就变成了readWrite了,表意不够准确
if (readWrite) {
// 只读的话,增加SerializedCache
cache = new SerializedCache(cache);
}
// 增加LoggingCache
cache = new LoggingCache(cache);
// 增加SynchronizedCache
cache = new SynchronizedCache(cache);
if (blocking) {
// 阻塞的话,增加BlockingCache
cache = new BlockingCache(cache);
}
return cache;
} catch (Exception e) {
throw new CacheException("Error building standard cache decorators. Cause: " + e, e);
}
}
这么多层Cache装饰下来,最终就有一个这样的结构了。
到这里,缓存的初始化就完成了。下面我们看看怎么去使用的
二、使用二级缓存
我们来看看效果:只有第一次的时候,去查询了数据库,第二次没有去查询数据库,缓存命中了。
DefaultSqlSessionFactory.openSession
在factory.openSession
的时候,会去创建executor
。
@Override
public SqlSession openSession() {
return openSessionFromDataSource(configuration.getDefaultExecutorType(), null, false);
}
private SqlSession openSessionFromDataSource(ExecutorType execType, TransactionIsolationLevel level,
boolean autoCommit) {
Transaction tx = null;
try {
final Environment environment = configuration.getEnvironment();
final TransactionFactory transactionFactory = getTransactionFactoryFromEnvironment(environment);
tx = transactionFactory.newTransaction(environment.getDataSource(), level, autoCommit);
final Executor executor = configuration.newExecutor(tx, execType);
return new DefaultSqlSession(configuration, executor, autoCommit);
} catch (Exception e) {
closeTransaction(tx); // may have fetched a connection so lets call close()
throw ExceptionFactory.wrapException("Error opening session. Cause: " + e, e);
} finally {
ErrorContext.instance().reset();
}
}
Configuration.newExecutor
这里的判断条件有一个cacheEnabled
,这个就是二级缓存的开关,默认打开着。这里就会用CacheExecutor
对SimpleExecutor
进行包装。
public Executor newExecutor(Transaction transaction, ExecutorType executorType) {
executorType = executorType == null ? defaultExecutorType : executorType;
Executor executor;
if (ExecutorType.BATCH == executorType) {
executor = new BatchExecutor(this, transaction);
} else if (ExecutorType.REUSE == executorType) {
executor = new ReuseExecutor(this, transaction);
} else {
executor = new SimpleExecutor(this, transaction);
}
if (cacheEnabled) {
executor = new CachingExecutor(executor);
}
return (Executor) interceptorChain.pluginAll(executor);
}
DefaultSqlSession.selectOne
DefaultSqlSession
创建好了后,再进入到DefaultSqlSession
中,有个selectOne
方法。因为我们是根据用户主键id去查询的 ,所以最终只能返回一条数据。肯定会进入到selectOne
中。
多个重载后进入到selectList
中。
@Override
public <T> T selectOne(String statement) {
return this.selectOne(statement, null);
}
public <T> T selectOne(String statement, Object parameter) {
// Popular vote was to return null on 0 results and throw exception on too many.
List<T> list = this.selectList(statement, parameter);
if (list.size() == 1) {
return list.get(0);
}
if (list.size() > 1) {
throw new TooManyResultsException(
"Expected one result (or null) to be returned by selectOne(), but found: " + list.size());
} else {
return null;
}
}
@Override
public <E> List<E> selectList(String statement, Object parameter) {
return this.selectList(statement, parameter, RowBounds.DEFAULT);
}
@Override
public <E> List<E> selectList(String statement, Object parameter, RowBounds rowBounds) {
return selectList(statement, parameter, rowBounds, Executor.NO_RESULT_HANDLER);
}
private <E> List<E> selectList(String statement, Object parameter, RowBounds rowBounds, ResultHandler handler) {
try {
MappedStatement ms = configuration.getMappedStatement(statement);
dirty |= ms.isDirtySelect();
return executor.query(ms, wrapCollection(parameter), rowBounds, handler);
} catch (Exception e) {
throw ExceptionFactory.wrapException("Error querying database. Cause: " + e, e);
} finally {
ErrorContext.instance().reset();
}
}
CacheExecutor.query
executor
的外层包装是CacheExecutor
,所以会进入到CacheExecutor
中。然后调用createCacheKey
。
@Override
public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler)
throws SQLException {
BoundSql boundSql = ms.getBoundSql(parameterObject);
CacheKey key = createCacheKey(ms, parameterObject, rowBounds, boundSql);
return query(ms, parameterObject, rowBounds, resultHandler, key, boundSql);
}
@Override
public CacheKey createCacheKey(MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql) {
return delegate.createCacheKey(ms, parameterObject, rowBounds, boundSql);
}
BaseExecutor.createCacheKey
这里会根据很多的参数以及条件去创建一个值当做缓存的key值。
@Override
public CacheKey createCacheKey(MappedStatement ms, Object parameterObject, RowBounds rowBounds, BoundSql boundSql) {
if (closed) {
throw new ExecutorException("Executor was closed.");
}
CacheKey cacheKey = new CacheKey();
cacheKey.update(ms.getId());
cacheKey.update(rowBounds.getOffset());
cacheKey.update(rowBounds.getLimit());
cacheKey.update(boundSql.getSql());
List<ParameterMapping> parameterMappings = boundSql.getParameterMappings();
TypeHandlerRegistry typeHandlerRegistry = ms.getConfiguration().getTypeHandlerRegistry();
// mimic DefaultParameterHandler logic
for (ParameterMapping parameterMapping : parameterMappings) {
if (parameterMapping.getMode() != ParameterMode.OUT) {
Object value;
String propertyName = parameterMapping.getProperty();
if (boundSql.hasAdditionalParameter(propertyName)) {
value = boundSql.getAdditionalParameter(propertyName);
} else if (parameterObject == null) {
value = null;
} else if (typeHandlerRegistry.hasTypeHandler(parameterObject.getClass())) {
value = parameterObject;
} else {
MetaObject metaObject = configuration.newMetaObject(parameterObject);
value = metaObject.getValue(propertyName);
}
cacheKey.update(value);
}
}
if (configuration.getEnvironment() != null) {
// issue #176
cacheKey.update(configuration.getEnvironment().getId());
}
return cacheKey;
}
CacheExecutor.query
再回到CacheExecutor.query
中来 。第一个的时候,缓存中没有这个key的数据。则直接去查询DB,查到了之后,放入到缓存中。
以后再来查询的时候,就会直接根据cacheKey
来去缓存中取数据。
而所有的增删改查都会默认的清空缓存的。
@Override
public <E> List<E> query(MappedStatement ms, Object parameterObject, RowBounds rowBounds, ResultHandler resultHandler,
CacheKey key, BoundSql boundSql) throws SQLException {
// 这里的cache对象就是初始化的时候mapper中的cache:UserMapper.xml <cache />
Cache cache = ms.getCache();
if (cache != null) {
flushCacheIfRequired(ms);
if (ms.isUseCache() && resultHandler == null) {
ensureNoOutParams(ms, boundSql);
@SuppressWarnings("unchecked")
// 获取缓存,tcm.getObject
List<E> list = (List<E>) tcm.getObject(cache, key);
if (list == null) {
list = delegate.query(ms, parameterObject, rowBounds, resultHandler, key, boundSql);
// 结果写入到二级缓存中
tcm.putObject(cache, key, list); // issue #578 and #116
}
return list;
}
}
return delegate.query(ms, parameterObject, rowBounds, resultHandler, key, boundSql);
}
三、集成Redis:RedisCache
导入maven依赖。
<dependency>
<groupId>org.mybatis.caches</groupId>
<artifactId>mybatis-redis</artifactId>
<version>1.0.0-beta2</version>
</dependency>
核心类:RedisCache
package org.mybatis.caches.redis;
import java.util.Map;
import java.util.concurrent.locks.ReadWriteLock;
import org.apache.ibatis.cache.Cache;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
public final class RedisCache implements Cache {
private final ReadWriteLock readWriteLock = new DummyReadWriteLock();
private String id;
private static JedisPool pool;
public RedisCache(String id) {
if (id == null) {
throw new IllegalArgumentException("Cache instances require an ID");
} else {
this.id = id;
RedisConfig redisConfig = RedisConfigurationBuilder.getInstance().parseConfiguration();
pool = new JedisPool(redisConfig, redisConfig.getHost(), redisConfig.getPort(), redisConfig.getConnectionTimeout(), redisConfig.getSoTimeout(), redisConfig.getPassword(), redisConfig.getDatabase(), redisConfig.getClientName());
}
}
private Object execute(RedisCallback callback) {
Jedis jedis = pool.getResource();
Object var3;
try {
var3 = callback.doWithRedis(jedis);
} finally {
jedis.close();
}
return var3;
}
public String getId() {
return this.id;
}
public int getSize() {
return (Integer)this.execute(new RedisCallback() {
public Object doWithRedis(Jedis jedis) {
Map<byte[], byte[]> result = jedis.hgetAll(RedisCache.this.id.toString().getBytes());
return result.size();
}
});
}
// jedis.hset
public void putObject(final Object key, final Object value) {
this.execute(new RedisCallback() {
public Object doWithRedis(Jedis jedis) {
jedis.hset(RedisCache.this.id.toString().getBytes(), key.toString().getBytes(), SerializeUtil.serialize(value));
return null;
}
});
}
// jedis.hget
public Object getObject(final Object key) {
return this.execute(new RedisCallback() {
public Object doWithRedis(Jedis jedis) {
return SerializeUtil.unserialize(jedis.hget(RedisCache.this.id.toString().getBytes(), key.toString().getBytes()));
}
});
}
// jedis.hdel
public Object removeObject(final Object key) {
return this.execute(new RedisCallback() {
public Object doWithRedis(Jedis jedis) {
return jedis.hdel(RedisCache.this.id.toString(), new String[]{key.toString()});
}
});
}
// jedis.del
public void clear() {
this.execute(new RedisCallback() {
public Object doWithRedis(Jedis jedis) {
jedis.del(RedisCache.this.id.toString());
return null;
}
});
}
public ReadWriteLock getReadWriteLock() {
return this.readWriteLock;
}
public String toString() {
return "Redis {" + this.id + "}";
}
}
看过基于Redis实现的二级缓存后,我们是不是也可以自己用其他的中间键来实现呢?