注意!!!博主只在测试环境试了一下,没有发到生产环境跑。因为代码还没写完客户说不用弄了( •̩̩̩̩_•̩̩̩̩ ) 也好,少个功能少点BUG
使用from + size的时候发现存在max_result_window=10000的限制,于是研究使用别的方法,最终想出个歪招来实现深度分页跳页。
1、三种分页方式比较
2、实现过程
- BaseEsMapper构建查询条件
- RestHighLevelClient实现scroll大分页滚动查询
- 判断是否跨页,如果跨页滚动拿到两页数据
- 根据前端分页参数进行数据截取返回
3、实现代码
引入依赖
<dependency>
<groupId>org.dromara.easy-es</groupId>
<artifactId>easy-es-boot-starter</artifactId>
<version>2.0.0</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-client</artifactId>
<version>7.6.2</version>
</dependency>
<dependency>
<groupId>org.elasticsearch.client</groupId>
<artifactId>elasticsearch-rest-high-level-client</artifactId>
<version>7.6.2</version>
</dependency>
SearchResult
import cn.hutool.json.JSONObject;
import cn.hutool.json.JSONUtil;
import lombok.Data;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
@Data
public class SearchResult<T> implements Serializable {
private int total;
private List<DocBaseEntity<T>> source = new ArrayList<>();
private JSONObject aggregations;
private String scrollId;
public void addData(DocBaseEntity<T> obj){
source.add(obj);
}
public List<T> getDatas(){
return source.stream().map(DocBaseEntity::getDatas).collect(Collectors.toList());
}
public void addDatas(List<DocBaseEntity<T>> objs){
source.addAll(objs);
}
public void setTotal(Object total){
this.total = Integer.parseInt(String.valueOf(total));
}
public JSONObject toJSONObject(){
return JSONUtil.parseObj(this,true);
}
}
DocBaseEntity
import cn.hutool.json.JSONObject;
import lombok.Data;
import org.elasticsearch.search.SearchHit;
import java.io.Serializable;
@Data
public class DocBaseEntity<T> implements Serializable {
private String _index;
private String _type;
private String _id;
private T datas;
public DocBaseEntity(SearchHit data) {
this._index = data.getIndex();
this._type = data.getType();
this._id = data.getId();
}
public DocBaseEntity(JSONObject jsonHits){
this._index = jsonHits.getStr("_index");
this._type = jsonHits.getStr("_type");
this._id = jsonHits.getStr("_id");
}
public T getDatas(){
return datas;
}
}
RestClientConfig
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
@Configuration
public class RestClientConfig {
@Value("${elasticsearch.clientIps}")
private String clientIps;
@Value("${elasticsearch.httpPort}")
private int httpPort;
@Value("${elasticsearch.username}")
private String username;
@Value("${elasticsearch.password}")
private String password;
private HttpHost[] getHttpHosts(String clientIps, int esHttpPort) {
String[] clientIpList = clientIps.split(",");
HttpHost[] httpHosts = new HttpHost[clientIpList.length];
for (int i = 0; i < clientIpList.length; i++) {
httpHosts[i] = new HttpHost(clientIpList[i], esHttpPort, "http");
}
return httpHosts;
}
/**
* 创建带HTTP Basic Auth认证rest客户端
*/
@Bean
public RestHighLevelClient restHighLevelClient() {
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(username, password));
return new RestHighLevelClient(RestClient.builder(getHttpHosts(clientIps, httpPort)).setHttpClientConfigCallback((HttpAsyncClientBuilder httpAsyncClientBuilder) -> httpAsyncClientBuilder.setDefaultCredentialsProvider(credentialsProvider)));
}
}
滚动查询方法
@Autowired
private RestHighLevelClient restHighLevelClient;
/**
* 滚动查询
* @param indexName
* @param pageNo
* @param pageSize
* @param scrollId
* @param resultObj
* @param wrapper
* @param <T>
* @return
* @throws IOException
*/
public <T> SearchResult<T> scrollSearchElasticSearchDatas(String indexName,
int pageNo,
int pageSize,
String scrollId,
Class<T> resultObj,
LambdaEsQueryWrapper<ES实体类Bean> wrapper) throws IOException {
SearchSourceBuilder searchSourceBuilder = rdsBookCommonDetailEsMapper.getSearchSourceBuilder(wrapper);
searchSourceBuilder.size(pageSize);
SearchRequest searchRequest = new SearchRequest(indexName);
searchRequest.source(searchSourceBuilder);
//设定scroll失效时长
Scroll scroll = new Scroll(TimeValue.timeValueMinutes(3));
searchRequest.scroll(scroll);
SearchResponse searchResponse = null;
if(StrUtil.isEmpty(scrollId)){
searchResponse = executSearch(searchRequest);
String tempscrollId = searchResponse.getScrollId();
SearchScrollRequest searchScrollRequest = new SearchScrollRequest(tempscrollId);
searchScrollRequest.scroll(scroll);
for (int i = 0; i < (pageNo -1); i++) {
searchResponse = scrollSearch(searchScrollRequest);
}
scrollId = tempscrollId;
}else {
SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId);
searchResponse = scrollSearch(searchScrollRequest);
}
//构建结果
SearchResult<T> result = createSearchResult(searchResponse,resultObj);
result.setScrollId(scrollId);
return result;
}
/**
* 执行查询
*/
private SearchResponse executSearch(SearchRequest searchRequest) {
SearchResponse searchResponse = null;
try{
searchResponse = restHighLevelClient.search(searchRequest,RequestOptions.DEFAULT);
}catch(Exception e){
//异常处理
}
return searchResponse;
}
/**
* 滚动查询执行
* @param searchScrollRequest
* @return
*/
private SearchResponse scrollSearch(SearchScrollRequest searchScrollRequest){
SearchResponse searchResponse = null;
try{
searchResponse = restHighLevelClient.scroll(searchScrollRequest,RequestOptions.DEFAULT);
}catch(Exception e){
//异常处理
}
return searchResponse;
}
/**
* 构建目标结果
* @param response 返回参数
* @param resultObj 类对象
* @param <T>
* @return
*/
private <T> SearchResult<T> createSearchResult(SearchResponse response,Class<T> resultObj){
SearchResult<T> resultMap = new SearchResult<>();
SearchHit[] datas = response.getHits().getHits();
for(SearchHit data:datas){
DocBaseEntity<T> temp = new DocBaseEntity<>(data);
temp.setDatas(JSONUtil.toBean(JSONUtil.parseObj(data.getSourceAsMap()),resultObj));
resultMap.addData(temp);
}
resultMap.setTotal(response.getHits().getTotalHits().value);
return resultMap;
}
/**
* 关闭scroll
* @param scrollId
* @throws IOException
*/
private void clearScrollSession(String scrollId) throws IOException {
if (scrollId != null) {
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
clearScrollRequest.addScrollId(scrollId);
ClearScrollResponse clearScrollResponse = restHighLevelClient.clearScroll(clearScrollRequest, RequestOptions.DEFAULT);
clearScrollResponse.isSucceeded();
}
}
使用
LambdaEsQueryWrapper<ES实体类Bean> wrapper = new LambdaEsQueryWrapper<>();
wrapper.eqxxx
...构建各种查询条件
// 游标id
String scrollId = "";
// 第一次查询
SearchResult<ES实体类Bean> firstSearchResult = null;
// 第二次查询
SearchResult<ES实体类Bean> secondSearchResult = null;
// 每次滚动查询5000条数据,根据前端传的分页参数决定要滚动到哪一页
int maxPage = ((searchCommonDetaiVo.getCurrentPage() * searchCommonDetaiVo.getPageSize()) / 5000) + 1;
try {
firstSearchResult = scrollSearchElasticSearchDatas("IndexName", maxPage, 5000, scrollId, ES实体类Bean.class, wrapper);
scrollId = firstSearchResult.getScrollId();
} catch (IOException e) {
e.printStackTrace();
}
// 判断是否跨页
if (searchCommonDetaiVo.getPageSize() > (searchCommonDetaiVo.getCurrentPage() * searchCommonDetaiVo.getPageSize()) % 5000) {
try {
secondSearchResult = scrollSearchElasticSearchDatas("IndexName", maxPage-1, 5000, scrollId, ES实体类Bean.class, wrapper);
} catch (IOException e) {
e.printStackTrace();
}
}
// TODO 根据前端分页参数截取数据返回
try {
clearScrollSession(scrollId);
} catch (IOException e) {
log.info("游标清理失败");
e.printStackTrace();
}