当前位置: 代码网 > it编程>编程语言>Java > MyBatis之二级缓存用法及说明

MyBatis之二级缓存用法及说明

2026年05月11日 Java 我要评论
1. 架构总览1.1 二级缓存在mybatis中的位置mybatis runtime├── configuration (全局配置)│├── mapperregistry (mapper注册中心)│└

1. 架构总览

1.1 二级缓存在mybatis中的位置

mybatis runtime
├── configuration (全局配置)
│├── mapperregistry (mapper注册中心)
│└── cache (缓存管理器)
├── executor (执行器)
│├── baseexecutor (基础执行器,含一级缓存)
│└── cachingexecutor (二级缓存装饰器) ★
└── sqlsession (会话)

2. 源码深度解析

2.1 核心类图解析

// 核心接口和类
public interface cache {
string getid();
void putobject(object key, object value);
object getobject(object key);
object removeobject(object key);
void clear();
int getsize();
readwritelock getreadwritelock();
}

// 默认实现:perpetualcache
public class perpetualcache implements cache {
private final string id;
private final map<object, object> cache = new hashmap<>();
// 基础的hashmap实现
}

// 装饰器模式:各种cache装饰器
public class lrucache implements cache {
private final cache delegate;
private final map<object, object> keymap;
private object eldestkey;
// lru算法实现
}

2.2 cachingexecutor:二级缓存的核心

public class cachingexecutor implements executor {
private final executor delegate;// 被装饰的执行器(通常是simpleexecutor)
private final transactionalcachemanager tcm = new transactionalcachemanager();

@override
public <e> list<e> query(mappedstatement ms,
object parameter,
rowbounds rowbounds,
resulthandler resulthandler,
cachekey key,
boundsql boundsql) throws sqlexception {

// 1. 获取mappedstatement的缓存
cache cache = ms.getcache();

if (cache != null) {
// 2. 检查是否需要刷新缓存
flushcacheifrequired(ms);

// 3. 如果usecache=true且没有设置resulthandler
if (ms.isusecache() && resulthandler == null) {
// 4. 确保没有输出参数
ensurenooutparams(ms, boundsql);

// 5. 从二级缓存获取
@suppresswarnings("unchecked")
list<e> list = (list<e>) tcm.getobject(cache, key);

if (list == null) {
// 6. 缓存未命中,委托给实际执行器查询
list = delegate.query(ms, parameter, rowbounds,
resulthandler, key, boundsql);

// 7. 将结果放入二级缓存
tcm.putobject(cache, key, list);
}
return list;
}
}
// 8. 没有缓存配置,直接查询
return delegate.query(ms, parameter, rowbounds,
resulthandler, key, boundsql);
}
}

2.3 cachekey的生成机制

public class cachekey implements cloneable, serializable {
private static final int default_multiplyer = 37;
private static final int default_hashcode = 17;

private final int multiplier;
private int hashcode;
private long checksum;
private int count;
private list<object> updatelist;// 用于equals比较

public cachekey(object[] objects) {
this.multiplier = default_multiplyer;
this.hashcode = default_hashcode;

// 关键:按顺序组合所有元素生成hash
updateall(objects);
}

public void update(object object) {
int basehashcode = object == null ? 1 :
arrayutil.hashcode(object);

count++;
checksum += basehashcode;
basehashcode *= count;

hashcode = multiplier * hashcode + basehashcode;

updatelist.add(object);
}

// cachekey生成位置:baseexecutor.createcachekey()
public cachekey createcachekey(mappedstatement ms,
object parameterobject,
rowbounds rowbounds,
boundsql boundsql) {
cachekey cachekey = new cachekey();
cachekey.update(ms.getid());// mapper id
cachekey.update(rowbounds.getoffset());// 分页offset
cachekey.update(rowbounds.getlimit());// 分页limit
cachekey.update(boundsql.getsql());// sql语句

// 参数
list<parametermapping> parametermappings =
boundsql.getparametermappings();
typehandlerregistry typehandlerregistry =
ms.getconfiguration().gettypehandlerregistry();

for (parametermapping parametermapping : parametermappings) {
object value;
string propertyname = parametermapping.getproperty();
// ... 获取参数值逻辑
cachekey.update(value);
}

// 环境id
if (ms.getconfiguration().getenvironment() != null) {
cachekey.update(ms.getconfiguration()
.getenvironment().getid());
}

return cachekey;
}
}

2.4 缓存装饰器链的实现

/**
* 装饰器模式的典型应用
* 配置:<cache eviction="lru" flushinterval="60000" size="512" readonly="true"/>
* 实际创建的缓存对象链:
* synchronizedcache
*-> loggingcache
*-> serializedcache
*-> lrucache
*-> perpetualcache
*/
public class cachebuilder {
private class<? extends cache> implementation = perpetualcache.class;
private final list<class<? extends cache>> decorators = new arraylist<>();
private class<? extends cache> evictionclass;
private long flushinterval;
private integer size;
private boolean readwrite;
private properties properties;

public cache build() {
// 1. 创建基础缓存
cache cache = newbasecacheinstance(implementation, id);
setcacheproperties(cache);

// 2. 根据配置添加装饰器
if (perpetualcache.class.equals(cache.getclass())) {
for (class<? extends cache> decorator : decorators) {
cache = newcachedecoratorinstance(decorator, cache);
setcacheproperties(cache);
}
cache = setstandarddecorators(cache);
}

return cache;
}

private cache setstandarddecorators(cache cache) {
try {
metaobject metacache = systemmetaobject.forobject(cache);

// 添加序列化装饰器(如果readonly=false)
if (!readwrite) {
cache = new serializedcache(cache);
}

// 添加日志装饰器
cache = new loggingcache(cache);

// 添加同步装饰器
cache = new synchronizedcache(cache);

// 添加定时清理装饰器
if (flushinterval != null) {
cache = new scheduledcache(cache);
((scheduledcache) cache).setclearinterval(flushinterval);
}

// 添加lru/fifo等装饰器
if (size != null && size > 0) {
cache = new lrucache(cache);
((lrucache) cache).setsize(size);
}

return cache;
} catch (exception e) {
throw new cacheexception("error building standard cache decorators.", e);
}
}
}

2.5 transactionalcachemanager:事务性缓存管理

/**
* 管理事务中的缓存操作
* 关键:只有事务提交后,缓存才会真正生效
*/
public class transactionalcachemanager {
// key: cache对象, value: transactionalcache
private final map<cache, transactionalcache> transactionalcaches =
new hashmap<>();

public object getobject(cache cache, cachekey key) {
return gettransactionalcache(cache).getobject(key);
}

public void putobject(cache cache, cachekey key, object value) {
gettransactionalcache(cache).putobject(key, value);
}

public void commit() {
for (transactionalcache txcache : transactionalcaches.values()) {
txcache.commit();
}
}

public void rollback() {
for (transactionalcache txcache : transactionalcaches.values()) {
txcache.rollback();
}
}

private transactionalcache gettransactionalcache(cache cache) {
return transactionalcaches.computeifabsent(cache,
key -> new transactionalcache(cache));
}
}

public class transactionalcache implements cache {
private final cache delegate;
private boolean clearoncommit;// 提交时是否清空
private final map<object, object> entriestoaddoncommit = new hashmap<>();
private final set<object> entriesmissedincache = new hashset<>();

@override
public void putobject(object key, object value) {
// 不直接放入delegate,先放入临时map
entriestoaddoncommit.put(key, value);
}

public void commit() {
if (clearoncommit) {
delegate.clear();
}
// 提交时批量放入缓存
flushpendingentries();
reset();
}

private void flushpendingentries() {
for (map.entry<object, object> entry :
entriestoaddoncommit.entryset()) {
delegate.putobject(entry.getkey(), entry.getvalue());
}
// 处理未命中的查询
for (object entry : entriesmissedincache) {
if (!entriestoaddoncommit.containskey(entry)) {
delegate.putobject(entry, null); // 缓存null值
}
}
}
}

3. 详细使用案例

3.1 基础配置与使用

<!-- mybatis-config.xml -->
<configuration>
<settings>
<!-- 开启二级缓存(默认true) -->
<setting name="cacheenabled" value="true"/>
<!-- 本地缓存作用域 -->
<setting name="localcachescope" value="session"/>
</settings>

<!-- 使用第三方缓存 -->
<typealiases>
<typealias type="org.mybatis.caches.ehcache.ehcachecache"
alias="eh_cache"/>
</typealiases>
</configuration>

<!-- usermapper.xml -->
<?xml version="1.0" encoding="utf-8"?>
<!doctype mapper public "-//mybatis.org//dtd mapper 3.0//en"
"http://mybatis.org/dtd/mybatis-3-mapper.dtd">
<mapper namespace="com.example.mapper.usermapper">

<!-- 开启二级缓存并详细配置 -->
<cache
type="org.mybatis.caches.ehcache.ehcachecache"
eviction="lru"
flushinterval="60000"
size="1000"
readonly="false"
blocking="false"
properties="timetoliveseconds=3600,timetoidleseconds=1800"/>

<!-- 结果映射 -->
<resultmap id="userresultmap" type="user">
<id property="id" column="id"/>
<result property="username" column="username"/>
<result property="email" column="email"/>
<!-- 注意:关联对象需要单独处理缓存 -->
<association property="department" column="dept_id"
select="com.example.mapper.deptmapper.selectbyid"
fetchtype="lazy"/>
</resultmap>

<!-- 使用缓存的查询 -->
<select id="selectbyid" resultmap="userresultmap" usecache="true">
select * from users where id = #{id}
</select>

<!-- 不使用缓存的查询 -->
<select id="selectforupdate" resultmap="userresultmap" usecache="false">
select * from users where id = #{id} for update
</select>

<!-- 执行前刷新缓存的查询 -->
<select id="selectafterrefresh" resultmap="userresultmap"
flushcache="true">
select * from users where id = #{id}
</select>

<!-- 更新操作,默认flushcache="true" -->
<update id="updateuser" parametertype="user">
update users
set username=#{username}, email=#{email}
where id=#{id}
</update>
</mapper>

3.2 注解方式配置

package com.example.mapper;

import org.apache.ibatis.annotations.*;
import org.apache.ibatis.cache.decorators.lrucache;
import org.apache.ibatis.cache.impl.perpetualcache;
import org.apache.ibatis.mapping.statementtype;

// 方式1:直接使用@cachenamespace
@cachenamespace(
implementation = perpetualcache.class,// 实现类
eviction = lrucache.class,// 回收策略
flushinterval = 60000l,// 刷新间隔
size = 512,// 缓存大小
readwrite = true,// 读写缓存
blocking = false,// 是否阻塞
properties = {
@property(name = "timetoliveseconds", value = "3600"),
@property(name = "timetoidleseconds", value = "1800")
}
)
// 方式2:引用xml配置的缓存
// @cachenamespaceref(usermapper.class)
public interface usermapper {

// 默认使用缓存
@select("select * from users where id = #{id}")
@results(id = "userresult", value = {
@result(property = "id", column = "id", id = true),
@result(property = "username", column = "username"),
@result(property = "email", column = "email")
})
user selectbyid(long id);

// 明确指定使用缓存
@select("select * from users where username = #{username}")
@options(usecache = true, flushcache = options.flushcachepolicy.false)
user selectbyusername(string username);

// 明确指定不使用缓存
@select("select * from users where id = #{id}")
@options(usecache = false)
user selectforupdate(long id);

// 更新操作,刷新缓存
@update("update users set username = #{username} where id = #{id}")
@options(flushcache = options.flushcachepolicy.true)
int updateusername(@param("id") long id, @param("username") string username);

// 存储过程调用,缓存处理
@select(value = "{call get_user_by_id(#{id, mode=in})}",
statementtype = statementtype.callable)
@options(usecache = true)
user selectbyprocedure(long id);
}

3.3 复杂场景:关联查询缓存

/**
* 关联查询的缓存处理方案
*/
public class associationcacheexample {

// deptmapper.java
@cachenamespace
public interface deptmapper {
@select("select * from departments where id = #{id}")
department selectbyid(long id);
}

// usermapper.java - 方案1:嵌套查询(n+1问题)
public interface usermapper {
@select("select * from users where id = #{id}")
@results({
@result(property = "id", column = "id"),
@result(property = "deptid", column = "dept_id"),
@result(property = "department", column = "dept_id",
one = @one(select = "com.example.mapper.deptmapper.selectbyid",
fetchtype = fetchtype.lazy))
})
user selectuserwithdept(long id);
}

// 方案2:联合查询(缓存处理)
@select("select u.*, d.name as dept_name, d.code as dept_code " +
"from users u left join departments d on u.dept_id = d.id " +
"where u.id = #{id}")
@results(id = "userwithdept", value = {
@result(property = "id", column = "id", id = true),
@result(property = "username", column = "username"),
@result(property = "department", javatype = department.class,
resultmap = "com.example.mapper.deptmapper.deptresult")
})
@options(usecache = true)
user selectuserwithdeptjoin(long id);

/**
* 测试关联查询缓存
*/
@test
public void testassociationcache() {
try (sqlsession session = sqlsessionfactory.opensession()) {
usermapper mapper = session.getmapper(usermapper.class);

// 第一次查询:会查询user和dept
system.out.println("第一次查询:");
user user1 = mapper.selectuserwithdept(1l);
session.commit(); // 必须提交

// 第二次查询:user从缓存获取,dept可能从缓存获取
system.out.println("\n第二次查询:");
user user2 = mapper.selectuserwithdept(1l);

// 验证缓存
system.out.println("user对象相同:" + (user1 == user2));
system.out.println("dept对象相同:" +
(user1.getdepartment() == user2.getdepartment()));
}
}
}

3.4 自定义缓存实现

/**
* 自定义redis缓存实现
*/
package com.example.cache;

import org.apache.ibatis.cache.cache;
import redis.clients.jedis.jedis;
import redis.clients.jedis.jedispool;

import java.util.concurrent.locks.readwritelock;
import java.util.concurrent.locks.reentrantreadwritelock;

public class customrediscache implements cache {

private final string id;
private final jedispool jedispool;
private final readwritelock readwritelock = new reentrantreadwritelock();

// 序列化器
private final serializer serializer = new jdkserializer();

public customrediscache(string id) {
this.id = id;
this.jedispool = new jedispool("localhost", 6379);
}

@override
public string getid() {
return id;
}

@override
public void putobject(object key, object value) {
try (jedis jedis = jedispool.getresource()) {
byte[] keybytes = serializer.serialize(key);
byte[] valuebytes = serializer.serialize(value);

jedis.setex(keybytes, 3600, valuebytes); // 1小时过期

// 维护key集合,用于clear操作
jedis.sadd(getkeyssetname(), keybytes);
}
}

@override
public object getobject(object key) {
try (jedis jedis = jedispool.getresource()) {
byte[] keybytes = serializer.serialize(key);
byte[] valuebytes = jedis.get(keybytes);

return valuebytes == null ? null : serializer.deserialize(valuebytes);
}
}

@override
public object removeobject(object key) {
try (jedis jedis = jedispool.getresource()) {
byte[] keybytes = serializer.serialize(key);
byte[] valuebytes = jedis.get(keybytes);

jedis.del(keybytes);
jedis.srem(getkeyssetname(), keybytes);

return valuebytes == null ? null : serializer.deserialize(valuebytes);
}
}

@override
public void clear() {
try (jedis jedis = jedispool.getresource()) {
string keyssetname = getkeyssetname();
set<byte[]> keys = jedis.smembers(keyssetname.getbytes());

if (!keys.isempty()) {
jedis.del(keys.toarray(new byte[0][]));
}
jedis.del(keyssetname);
}
}

@override
public int getsize() {
try (jedis jedis = jedispool.getresource()) {
long size = jedis.scard(getkeyssetname());
return size != null ? size.intvalue() : 0;
}
}

@override
public readwritelock getreadwritelock() {
return readwritelock;
}

private string getkeyssetname() {
return id + ":keys";
}

// 序列化接口
interface serializer {
byte[] serialize(object obj);
object deserialize(byte[] bytes);
}

// jdk序列化实现
static class jdkserializer implements serializer {
@override
public byte[] serialize(object obj) {
try (bytearrayoutputstream baos = new bytearrayoutputstream();
objectoutputstream oos = new objectoutputstream(baos)) {
oos.writeobject(obj);
return baos.tobytearray();
} catch (ioexception e) {
throw new cacheexception("serialization failed", e);
}
}

@override
public object deserialize(byte[] bytes) {
if (bytes == null) return null;
try (bytearrayinputstream bais = new bytearrayinputstream(bytes);
objectinputstream ois = new objectinputstream(bais)) {
return ois.readobject();
} catch (ioexception | classnotfoundexception e) {
throw new cacheexception("deserialization failed", e);
}
}
}
}

3.5 分布式环境下的缓存同步

/**
* 分布式缓存同步解决方案
*/
public class distributedcachesync {

/**
* 方案1:使用redis pub/sub同步缓存失效
*/
@component
public class cacheinvalidationlistener {

private final jedispool jedispool;
private final sqlsessionfactory sqlsessionfactory;

@postconstruct
public void init() {
new thread(() -> {
try (jedis jedis = jedispool.getresource()) {
jedis.subscribe(new jedispubsub() {
@override
public void onmessage(string channel, string message) {
// 收到缓存失效消息
handlecacheinvalidation(message);
}
}, "cache:invalidation");
}
}).start();
}

private void handlecacheinvalidation(string message) {
// 消息格式:mappername:cachekey
string[] parts = message.split(":");
if (parts.length == 2) {
string mappername = parts[0];
string cachekey = parts[1];

try (sqlsession session = sqlsessionfactory.opensession()) {
configuration config = session.getconfiguration();
cache cache = config.getcache(mappername);

if (cache != null) {
// 清除指定缓存
cache.removeobject(cachekey);
system.out.println("清除缓存:" + mappername + " - " + cachekey);
}
}
}
}

/**
* 发送缓存失效通知
*/
public void notifycacheinvalidation(string mappername, string cachekey) {
try (jedis jedis = jedispool.getresource()) {
string message = mappername + ":" + cachekey;
jedis.publish("cache:invalidation", message);
}
}
}

/**
* 方案2:自定义cache实现,集成分布式锁
*/
public class distributedlockcache implements cache {

private final cache delegate;
private final redissonclient redisson;

@override
public void putobject(object key, object value) {
string lockkey = "lock:cache:" + getid() + ":" + key.hashcode();
rlock lock = redisson.getlock(lockkey);

try {
lock.lock(5, timeunit.seconds); // 获取分布式锁
delegate.putobject(key, value);
} finally {
if (lock.isheldbycurrentthread()) {
lock.unlock();
}
}
}
}
}

3.6 性能监控与调优

/**
* 二级缓存性能监控工具
*/
public class cacheperformancemonitor {

private static final map<string, cachemetrics> metricsmap =
new concurrenthashmap<>();

@data
public static class cachemetrics {
private string cacheid;
private atomiclong hitcount = new atomiclong();
private atomiclong misscount = new atomiclong();
private atomiclong putcount = new atomiclong();
private atomiclong evictioncount = new atomiclong();
private atomiclong totaltime = new atomiclong();

public double gethitrate() {
long total = hitcount.get() + misscount.get();
return total == 0 ? 0 : (double) hitcount.get() / total;
}

public double getaverageaccesstime() {
long totalaccess = hitcount.get() + misscount.get();
return totalaccess == 0 ? 0 : (double) totaltime.get() / totalaccess;
}
}

/**
* 监控装饰器
*/
public static class monitoredcache implements cache {
private final cache delegate;
private final cachemetrics metrics;

public monitoredcache(cache delegate) {
this.delegate = delegate;
this.metrics = metricsmap.computeifabsent(delegate.getid(),
id -> new cachemetrics());
this.metrics.setcacheid(delegate.getid());
}

@override
public object getobject(object key) {
long start = system.nanotime();
try {
object value = delegate.getobject(key);
if (value != null) {
metrics.gethitcount().incrementandget();
} else {
metrics.getmisscount().incrementandget();
}
return value;
} finally {
long time = system.nanotime() - start;
metrics.gettotaltime().addandget(time);
}
}

@override
public void putobject(object key, object value) {
long start = system.nanotime();
try {
delegate.putobject(key, value);
metrics.getputcount().incrementandget();
} finally {
long time = system.nanotime() - start;
metrics.gettotaltime().addandget(time);
}
}

// 其他方法...
}

/**
* 生成监控报告
*/
public static void generatereport() {
system.out.println("=== 二级缓存性能报告 ===");
system.out.printf("%-30s %-10s %-10s %-10s %-10s%n",
"cache id", "hit rate", "hit", "miss", "avg time(ns)");
system.out.println("-".repeat(80));

for (cachemetrics metrics : metricsmap.values()) {
system.out.printf("%-30s %-10.2f %-10d %-10d %-10.2f%n",
metrics.getcacheid(),
metrics.gethitrate() * 100,
metrics.gethitcount().get(),
metrics.getmisscount().get(),
metrics.getaverageaccesstime());
}
}
}

/**
* 使用aop进行缓存监控
*/
@aspect
@component
public class cachemonitoraspect {

@pointcut("execution(* org.apache.ibatis.cache.cache.getobject(..))")
public void cachegetpointcut() {}

@around("cachegetpointcut()")
public object monitorcacheaccess(proceedingjoinpoint pjp) throws throwable {
cache cache = (cache) pjp.gettarget();
object key = pjp.getargs()[0];

string cacheid = cache.getid();
string keystr = key.tostring();

long start = system.currenttimemillis();
object result = pjp.proceed();
long time = system.currenttimemillis() - start;

// 记录到日志或监控系统
logrecord record = new logrecord(cacheid, keystr,
result != null ? "hit" : "miss", time);

// 发送到监控系统
sendtomonitor(record);

return result;
}
}

4. 最佳实践总结

4.1 配置建议

<!-- 生产环境推荐配置 -->
<cache
type="org.mybatis.caches.redis.rediscache"
eviction="lru"
flushinterval="0"<!-- 分布式环境下不自动刷新 -->
size="0"<!-- redis管理大小 -->
readonly="false"
blocking="true"<!-- 防止缓存击穿 -->
properties="
maxtotal=100,
maxidle=10,
minidle=5,
testonborrow=true,
timebetweenevictionrunsmillis=30000
"
/>

4.2 代码规范

// 1. 明确的事务边界
@transactional
public user getuserwithcache(long id) {
// 必须在一个事务内
try (sqlsession session = sqlsessionfactory.opensession()) {
usermapper mapper = session.getmapper(usermapper.class);
user user = mapper.selectbyid(id);
session.commit();// 关键:必须提交!
return user;
}
}

// 2. 批量操作处理
@transactional
public void batchupdateusers(list<user> users) {
try (sqlsession session = sqlsessionfactory.opensession()) {
usermapper mapper = session.getmapper(usermapper.class);

for (user user : users) {
mapper.updateuser(user);
// 每100条提交一次,避免缓存过大
if (i % 100 == 0) {
session.commit();
session.clearcache();// 可选:清空缓存
}
}
session.commit();
}
}

4.3 常见问题解决方案

问题现象解决方案
缓存穿透查询不存在的数据1. 缓存空对象
2. 布隆过滤器
缓存击穿热点key失效1. 永不过期
2. 互斥锁
缓存雪崩大量key同时失效1. 随机过期时间
2. 二级缓存
数据不一致缓存与db不一致1. 合理设置过期时间
2. 更新时清除缓存

5. 源码调试技巧

5.1 调试缓存流程

// 在ide中设置断点:
// 1. cachingexecutor.query() - 入口
// 2. transactionalcachemanager.getobject() - 获取缓存
// 3. cachekey.update() - 查看缓存键生成
// 4. baseexecutor.createcachekey() - 创建缓存键

// 启用mybatis日志
org.apache.ibatis.cache.decorators.loggingcache.level = debug
org.apache.ibatis.executor.cachingexecutor.level = trace

5.2 查看缓存状态

public class cacheinspector {

public static void inspectcache(sqlsessionfactory factory) {
configuration config = factory.getconfiguration();

// 获取所有缓存
map<string, cache> caches = config.getcaches();

for (map.entry<string, cache> entry : caches.entryset()) {
system.out.println("cache: " + entry.getkey());
cache cache = entry.getvalue();

// 获取缓存装饰器链
while (cache != null) {
system.out.println("|- " + cache.getclass().getsimplename());

// 通过反射获取delegate
try {
field delegatefield = cache.getclass()
.getdeclaredfield("delegate");
delegatefield.setaccessible(true);
cache = (cache) delegatefield.get(cache);
} catch (exception e) {
break;
}
}
}
}
}

总结

mybatis二级缓存的源码设计体现了多个设计模式的精妙应用:

  1. 装饰器模式:灵活组合缓存功能
  2. 模板方法模式:cachingexecutor的查询流程
  3. 策略模式:不同的缓存淘汰策略
  4. 代理模式:transactionalcache的事务管理

在实际使用中,需要根据业务场景选择合适的缓存策略,并注意:

  • 事务提交后缓存才生效
  • 关联查询的缓存处理
  • 分布式环境的一致性保证
  • 性能监控和调优

通过深入理解源码,可以更好地利用二级缓存提升系统性能,同时避免常见的数据一致性问题。

以上为个人经验,希望能给大家一个参考,也希望大家多多支持代码网。

(0)

相关文章:

版权声明:本文内容由互联网用户贡献,该文观点仅代表作者本人。本站仅提供信息存储服务,不拥有所有权,不承担相关法律责任。 如发现本站有涉嫌抄袭侵权/违法违规的内容, 请发送邮件至 2386932994@qq.com 举报,一经查实将立刻删除。

发表评论

验证码:
Copyright © 2017-2026  代码网 保留所有权利. 粤ICP备2024248653号
站长QQ:2386932994 | 联系邮箱:2386932994@qq.com