fanli/src/test/java/org/fanli/elastic/ADDUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/DeleteUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/GetUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/QueryDOUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/QueryUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/QueryUtils_Test.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/Test_Query.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 | |
fanli/src/test/java/org/fanli/elastic/UpdateUtils.java | ●●●●● 补丁 | 查看 | 原始文档 | blame | 历史 |
fanli/src/test/java/org/fanli/elastic/ADDUtils.java
New file @@ -0,0 +1,153 @@ package org.fanli.elastic; import java.io.IOException; import java.util.Date; import java.util.HashMap; import java.util.Map; import org.apache.http.HttpHost; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.yeshi.utils.JsonUtil; public class ADDUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void add1() { IndexRequest request = new IndexRequest("posts"); request.id("1"); String jsonString = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elasticsearch\"" + "}"; request.source(jsonString, XContentType.JSON); try { IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); } catch (IOException e) { e.printStackTrace(); } } public void add2() { Map<String, Object> jsonMap = new HashMap<>(); jsonMap.put("user", "kimchy"); jsonMap.put("postDate", new Date()); jsonMap.put("message", "trying out Elasticsearch"); IndexRequest request = new IndexRequest("posts").id("1").source(jsonMap); /* 可选参数 */ // 路由 request.routing("routing"); // 超时 request.timeout(TimeValue.timeValueSeconds(1)); request.timeout("1s"); // 刷新策略 request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); request.setRefreshPolicy("wait_for"); // 版本要求 request.version(2); // 版本类型 request.versionType(VersionType.EXTERNAL); // 操作类型 request.opType(DocWriteRequest.OpType.CREATE); request.opType("create"); // 索引文档之前要执行的线的名称 request.setPipeline("pipeline"); } public static void add3() { // 提供为的文档源,该源Map自动转换为JSON格式 try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); { builder.field("user", "kimchy2"); builder.timeField("postDate", new Date()); builder.field("message", "trying "); } builder.endObject(); IndexRequest request = new IndexRequest("posts").id("2").source(builder); IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); } catch (Exception e) { e.printStackTrace(); } // // 作为Object密钥对提供的文档源,被转换为JSON格式 // IndexRequest indexRequest = new IndexRequest("posts").id("1").source("user", "kimchy", "postDate", new Date(), // "message", "trying out Elasticsearch"); } public static void add4() { // Product product = new Product(); // product.setId("995"); // product.setName("测试中的5"); // product.setPrice("494985"); // product.setDetail("测试进欧冠任何5"); // String json = JsonUtil.getSimpleGson().toJson(""); IndexRequest request = new IndexRequest("shose").id("1"); request.source(json, XContentType.JSON); // 同步执行 try { IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT); } catch (IOException e) { e.printStackTrace(); } // // 异步执行 // ActionListener listener = new ActionListener<IndexResponse>() { // @Override // public void onResponse(IndexResponse indexResponse) { // // } // // @Override // public void onFailure(Exception e) { // // } // }; // client.indexAsync(request, RequestOptions.DEFAULT, listener); } public static void main(String[] args) { // add1(); add3(); // add4(); System.out.println("添加成功"); } } fanli/src/test/java/org/fanli/elastic/DeleteUtils.java
New file @@ -0,0 +1,99 @@ package org.fanli.elastic; import java.io.IOException; import org.apache.http.HttpHost; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; public class DeleteUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void get1() { DeleteRequest request = new DeleteRequest("posts", "2"); // // 可选参数 // request.timeout(TimeValue.timeValueMinutes(2)); // request.timeout("2m"); // // // 策略 // request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); // request.setRefreshPolicy("wait_for"); // // // 版本 // request.version(2); // request.versionType(VersionType.EXTERNAL); try { DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT); // // // 返回的值DeleteResponse允许检索有关已执行操作的信息, // String index = deleteResponse.getIndex(); // String id = deleteResponse.getId(); // long version = deleteResponse.getVersion(); // ReplicationResponse.ShardInfo shardInfo = deleteResponse.getShardInfo(); // if (shardInfo.getTotal() != shardInfo.getSuccessful()) { // // // 处理成功分片数量少于总分片数量的情况 // } // if (shardInfo.getFailed() > 0) { // for (ReplicationResponse.ShardInfo.Failure failure : // shardInfo.getFailures()) { // String reason = failure.reason(); // } // // 处理潜在的故障 // } // // // 可以检查是否找到了该文档: // DeleteRequest request = new DeleteRequest("posts", "does_not_exist"); // DeleteResponse deleteResponse = client.delete( // request, RequestOptions.DEFAULT); // if (deleteResponse.getResult() == DocWriteResponse.Result.NOT_FOUND) { // // } // // try { // DeleteResponse deleteResponse = client.delete( // new DeleteRequest("posts", "1").setIfSeqNo(100).setIfPrimaryTerm(2), // RequestOptions.DEFAULT); // } catch (ElasticsearchException exception) { // if (exception.status() == RestStatus.CONFLICT) { // // } // } // } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] args) { get1(); System.out.println("------------ 测试结束 -------------------------"); } } fanli/src/test/java/org/fanli/elastic/GetUtils.java
New file @@ -0,0 +1,151 @@ package org.fanli.elastic; import java.io.IOException; import java.util.Map; import org.apache.http.HttpHost; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; public class GetUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void get1() { // 获取请求 index, _id GetRequest request = new GetRequest("posts", "2"); // 禁用源检索,默认情况下启用 request.fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE); // 设置参数 String[] includes = new String[] { "message", "*Date" }; String[] excludes = Strings.EMPTY_ARRAY; FetchSourceContext fetchSourceContext = new FetchSourceContext(true, includes, excludes); request.fetchSourceContext(fetchSourceContext); request.storedFields("message"); // String[] includes = Strings.EMPTY_ARRAY; // String[] excludes = new String[]{"message"}; // FetchSourceContext fetchSourceContext = // new FetchSourceContext(true, includes, excludes); // request.fetchSourceContext(fetchSourceContext); // ---可选参数---- // request.routing("routing"); // request.preference("preference"); // // 实时标志设置 // request.realtime(false); // // 刷新 // request.refresh(true); // request.version(2); // request.versionType(VersionType.EXTERNAL); // --------- // 同步 try { GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); // String message = getResponse.getField("message").getValue(); // 返回的内容GetResponse允许检索请求的文档及其元数据和最终存储的字段。 String index = getResponse.getIndex(); String id = getResponse.getId(); // 判断是否找到 if (getResponse.isExists()) { long version = getResponse.getVersion(); System.out.println(version); // 将该文档检索为 String String sourceAsString = getResponse.getSourceAsString(); System.out.println(sourceAsString); // 文档检索为 Map<String, Object> Map<String, Object> sourceAsMap = getResponse.getSourceAsMap(); System.out.println(sourceAsMap); // 将该文档检索为 byte[] byte[] sourceAsBytes = getResponse.getSourceAsBytes(); System.out.println(sourceAsBytes); } else { // 处理找不到文档的情况。请注意,尽管返回的响应具有404状态码,但返回的是有效值GetResponse,而不是引发异常。 // 这样的响应不包含任何源文档,并且其isExists方法返回false。 } } catch (IOException e) { e.printStackTrace(); } // 当针对不存在的索引执行get请求时,响应具有404状态码,并且ElasticsearchException抛出get ,需要按以下方式处理: // 处理由于索引不存在而引发的异常 // GetRequest request = new GetRequest("does_not_exist", "1"); // try { // GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); // } catch (ElasticsearchException e) { // if (e.status() == RestStatus.NOT_FOUND) { // // } // } // 如果请求了特定的文档版本,并且现有文档具有不同的版本号,则会引发版本冲突: // 引发的异常表明已返回版本冲突错误 // try { // GetRequest request = new GetRequest("posts", "1").version(2); // GetResponse getResponse = client.get(request, RequestOptions.DEFAULT); // } catch (ElasticsearchException exception) { // if (exception.status() == RestStatus.CONFLICT) { // // } // } // 异步 ActionListener<GetResponse> listener = new ActionListener<GetResponse>() { @Override public void onResponse(GetResponse getResponse) { } @Override public void onFailure(Exception e) { } }; client.getAsync(request, RequestOptions.DEFAULT, listener); } // 判断是否存在 public static void get2() { // index、id文件编号 GetRequest getRequest = new GetRequest("shose", "20"); getRequest.fetchSourceContext(new FetchSourceContext(false)); // 禁用获取存储的字段。 getRequest.storedFields("_none_"); try { boolean exists = client.exists(getRequest, RequestOptions.DEFAULT); System.out.println(exists); } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] args) { get2(); System.out.println("------------ 测试结束 -------------------------"); } } fanli/src/test/java/org/fanli/elastic/QueryDOUtils.java
New file @@ -0,0 +1,307 @@ package org.fanli.elastic; import static org.junit.Assert.assertNull; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.http.HttpHost; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RethrottleRequest; import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.MultiTermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; public class QueryDOUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void MultiGetRequest() { MultiGetRequest request = new MultiGetRequest(); // Index、Document id request.add(new MultiGetRequest.Item("index", "example_id")); request.add(new MultiGetRequest.Item("index", "another_id")); /* 可选参数 */ // 禁用源检索,默认情况下启用 request.add(new MultiGetRequest.Item("index", "example_id") .fetchSourceContext(FetchSourceContext.DO_NOT_FETCH_SOURCE)); // 1、为特定字段配置源包含 String[] includes = new String[] { "foo", "*r" }; String[] excludes = Strings.EMPTY_ARRAY; FetchSourceContext fetchSourceContext = new FetchSourceContext(true, includes, excludes); request.add(new MultiGetRequest.Item("index", "example_id").fetchSourceContext(fetchSourceContext)); // 2、为特定字段配置源排除 String[] includes2 = Strings.EMPTY_ARRAY; String[] excludes2 = new String[] { "foo", "*r" }; FetchSourceContext fetchSourceContext2 = new FetchSourceContext(true, includes2, excludes2); request.add(new MultiGetRequest.Item("index", "example_id").fetchSourceContext(fetchSourceContext2)); // 配置特定存储字段的检索(要求将字段分别存储在映射中) 检索foo存储的字段(要求将字段单独存储在映射中) request.add(new MultiGetRequest.Item("index", "example_id").storedFields("foo")); try { MultiGetResponse response = client.mget(request, RequestOptions.DEFAULT); MultiGetItemResponse item = response.getResponses()[0]; String value = item.getResponse().getField("foo").getValue(); } catch (IOException e) { e.printStackTrace(); } request.add(new MultiGetRequest.Item("index", "with_routing").routing("some_routing")); request.add( new MultiGetRequest.Item("index", "with_version").versionType(VersionType.EXTERNAL).version(10123L)); // 偏好值 request.preference("some_preference"); // 将实时标志设置为false(true默认情况下) request.realtime(false); // 检索文档之前执行刷新(false默认情况下) request.refresh(true); try { MultiGetResponse response = client.mget(request, RequestOptions.DEFAULT); MultiGetItemResponse firstItem = response.getResponses()[0]; // getFailure 返回null,因为没有失败。 assertNull(firstItem.getFailure()); GetResponse firstGet = firstItem.getResponse(); String index = firstItem.getIndex(); String id = firstItem.getId(); if (firstGet.isExists()) { long version = firstGet.getVersion(); // 将该文档检索为 String String sourceAsString = firstGet.getSourceAsString(); // 将该文档检索为 Map<String, Object> Map<String, Object> sourceAsMap = firstGet.getSourceAsMap(); // 将该文档检索为 byte[] byte[] sourceAsBytes = firstGet.getSourceAsBytes(); } else { // 处理找不到文档的情况。请注意,尽管返回的响应具有404状态码,但返回的是有效值GetResponse,而不是引发异常。 // 这样的响应不包含任何源文档,并且其isExists方法返回false。 } } catch (IOException e) { e.printStackTrace(); } } public static void queryUpdate() { // 一个ReindexRequest可以用来从一个或多个索引文件复制到目标指数。 // 它要求在请求之前可能存在或可能不存在的现有源索引和目标索引。Reindex不会尝试设置目标索引。 // 它不会复制源索引的设置。您应该在运行_reindex操作之前设置目标索引,包括设置映射,分片计数,副本等。 ReindexRequest request = new ReindexRequest(); request.setSourceIndices("source1", "source2"); // 添加要复制的来源列表 request.setDestIndex("dest"); // 添加目标索引 // 。设置versionType为external将导致Elasticsearch从源保留版本,创建丢失的所有文档,并更新目标索引中比源索引中具有旧版本的任何文档。 request.setDestVersionType(VersionType.EXTERNAL); // 设置opType为create将会导致_reindex仅在目标索引中创建丢失的文档。所有现有文档都将导致版本冲突。默认opType值为index。 request.setDestOpType("create"); // 默认情况下,版本冲突会中止该_reindex过程,但是您可以使用以下方法来计算它们: request.setConflicts("proceed"); // 添加查询来限制文档。 仅复制字段user设置为kimchy request.setSourceQuery(new TermQueryBuilder("user", "kimchy")); // 限制已处理文档的数量maxDocs。 request.setMaxDocs(10); // 默认情况下_reindex使用的批次为1000。您可以使用更改批次大小sourceBatchSize。 request.setSourceBatchSize(100); // Reindex也可以通过指定来使用摄取功能pipeline。 request.setDestPipeline("my_pipeline"); // 如果要从源索引中获取一组特定的文档,则需要使用sort。如果可能,请选择更具选择性的查询,而不是maxDocs和排序。 request.addSortField("field1", SortOrder.DESC); request.addSortField("field2", SortOrder.ASC); // 支持script修改文档。它还允许您更改文档的元数据。 // setScriptlikes使用user 增大所有文档上的字段kimchy。 request.setScript(new Script(ScriptType.INLINE, "painless", "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", Collections.emptyMap())); // ReindexRequest支持从远程Elasticsearch集群重新索引。使用远程集群时,应在RemoteInfo对象内部指定查询,而不要使用setSourceQuery。 // 如果同时设置了远程信息和源查询,则会在请求期间导致验证错误。这样做的原因是远程Elasticsearch可能无法理解现代查询构建器构建的查询。 // 远程集群支持一直运行到Elasticsearch 0.90,此后查询语言已更改。达到旧版本时,以JSON手动编写查询会更安全。 // request.setRemoteInfo(new RemoteInfo("http", remoteHost, remotePort, null, // new BytesArray(new MatchAllQueryBuilder().toString()), user, password, Collections.emptyMap(), // new TimeValue(100, TimeUnit.MILLISECONDS), new TimeValue(100, TimeUnit.SECONDS))); try { BulkByScrollResponse bulkResponse = client.reindex(request, RequestOptions.DEFAULT); } catch (IOException e) { e.printStackTrace(); } } public static void queryUpdate2() { UpdateByQueryRequest request = new UpdateByQueryRequest("source1", "source2"); request.setConflicts("proceed"); request.setQuery(new TermQueryBuilder("user", "kimchy")); request.setMaxDocs(10); request.setBatchSize(100); request.setPipeline("my_pipeline"); request.setScript(new Script(ScriptType.INLINE, "painless", "if (ctx._source.user == 'kimchy') {ctx._source.likes++;}", Collections.emptyMap())); request.setSlices(2); request.setScroll(TimeValue.timeValueMinutes(10)); request.setRouting("=cat"); request.setTimeout(TimeValue.timeValueMinutes(2)); request.setRefresh(true); request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); try { BulkByScrollResponse bulkResponse = client.updateByQuery(request, RequestOptions.DEFAULT); // 获取总时间 TimeValue timeTaken = bulkResponse.getTook(); // 检查请求是否超时 boolean timedOut = bulkResponse.isTimedOut(); // 获取处理的文档总数 long totalDocs = bulkResponse.getTotal(); // 已更新的文档数 long updatedDocs = bulkResponse.getUpdated(); // 被删除的文档数 long deletedDocs = bulkResponse.getDeleted(); // 已执行的批次数 long batches = bulkResponse.getBatches(); // 跳过的文档数 long noops = bulkResponse.getNoops(); // 版本冲突数 long versionConflicts = bulkResponse.getVersionConflicts(); // 请求必须重试批量索引操作的次数 long bulkRetries = bulkResponse.getBulkRetries(); // 请求必须重试搜索操作的次数 long searchRetries = bulkResponse.getSearchRetries(); // 如果该请求当前处于睡眠状态,则该请求已进行自身限制的总时间不包括当前的限制时间 TimeValue throttledMillis = bulkResponse.getStatus().getThrottled(); // 当前节气门睡眠的剩余延迟;如果未睡眠,则为0 TimeValue throttledUntilMillis = bulkResponse.getStatus().getThrottledUntil(); // 搜索阶段失败 List<ScrollableHitSource.SearchFailure> searchFailures = bulkResponse.getSearchFailures(); // 大容量索引操作期间发生故障 List<BulkItemResponse.Failure> bulkFailures = bulkResponse.getBulkFailures(); } catch (IOException e) { e.printStackTrace(); } } // 按查询删除 public static void queryDelete() { DeleteByQueryRequest request = new DeleteByQueryRequest("source1", "source2"); request.setConflicts("proceed"); request.setQuery(new TermQueryBuilder("user", "kimchy")); request.setMaxDocs(10); request.setBatchSize(100); request.setSlices(2); // 设置要使用的切片数 request.setScroll(TimeValue.timeValueMinutes(10)); // 使用scroll参数来控制它使“搜索上下文”保持活动的时间。 request.setRouting("=cat"); request.setTimeout(TimeValue.timeValueMinutes(2)); // 等待通过查询请求执行删除的超时 TimeValue request.setRefresh(true); // 通过查询调用删除后刷新索引 request.setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN); // 设置索引选项 try { BulkByScrollResponse bulkResponse = client.deleteByQuery(request, RequestOptions.DEFAULT); } catch (IOException e) { e.printStackTrace(); } } public static void RethrottleRequest(TaskId taskId) { // 可用于更改正在运行的重新索引,按查询更新或按查询删除任务的当前限制,或完全禁用该任务的限制。它需要更改任务的任务ID。 RethrottleRequest request = new RethrottleRequest(taskId); RethrottleRequest request2 = new RethrottleRequest(taskId, 100.0f); try { // 执行重新索引重新调节请求 client.reindexRethrottle(request, RequestOptions.DEFAULT); // 通过查询更新相同 client.updateByQueryRethrottle(request, RequestOptions.DEFAULT); // 通过查询删除相同 client.deleteByQueryRethrottle(request, RequestOptions.DEFAULT); } catch (Exception e) { } } public static void MultiTermVectorsRequest() { MultiTermVectorsRequest request = new MultiTermVectorsRequest(); TermVectorsRequest tvrequest1 = new TermVectorsRequest("authors", "1"); tvrequest1.setFields("user"); request.add(tvrequest1); try { XContentBuilder docBuilder = XContentFactory.jsonBuilder(); docBuilder.startObject().field("user", "guest-user").endObject(); TermVectorsRequest tvrequest2 = new TermVectorsRequest("authors", docBuilder); request.add(tvrequest2); } catch (Exception e) { // TODO: handle exception } TermVectorsRequest tvrequestTemplate = new TermVectorsRequest("authors", "fake_id"); tvrequestTemplate.setFields("user"); String[] ids = { "1", "2" }; MultiTermVectorsRequest request2 = new MultiTermVectorsRequest(ids, tvrequestTemplate); try { MultiTermVectorsResponse response = client.mtermvectors(request, RequestOptions.DEFAULT); List<TermVectorsResponse> tvresponseList = response.getTermVectorsResponses(); if (tvresponseList != null) { for (TermVectorsResponse tvresponse : tvresponseList) { } } } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public static void main(String[] args) { System.out.println("------------ 测试结束 -------------------------"); } } fanli/src/test/java/org/fanli/elastic/QueryUtils.java
New file @@ -0,0 +1,519 @@ package org.fanli.elastic; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.http.HttpHost; import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.rankeval.EvalQueryQuality; import org.elasticsearch.index.rankeval.EvaluationMetric; import org.elasticsearch.index.rankeval.MetricDetail; import org.elasticsearch.index.rankeval.PrecisionAtK; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalResponse; import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedDocument; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.term.TermSuggestion; public class QueryUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void SearchRequest() { // SearchRequest searchRequest = new SearchRequest(); // 如果没有参数,这将与所有索引冲突。 // SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // searchRequest.source(searchSourceBuilder); // SearchRequest searchRequest = new SearchRequest("posts"); // searchRequest.routing("routing"); // // 设置IndicesOptions控制如何解决不可用的索引以及如何扩展通配符表达式 // searchRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); // // 使用首选项参数,例如,执行搜索以偏爱本地碎片。默认设置是随机分片。 // searchRequest.preference("_local"); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); sourceBuilder.from(0); // 索引以开始搜索的选项。预设为0。 sourceBuilder.size(5); // 返回的搜索命中次数的选项。默认为10 sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // 设置一个可选的超时时间,以控制允许搜索的时间。 SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("posts"); searchRequest.source(sourceBuilder); // 创建与“用户”字段上的文本“ kimchy”匹配的全文匹配查询。 MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("user", "kimchy"); matchQueryBuilder.fuzziness(Fuzziness.AUTO); // 对匹配查询启用模糊匹配 matchQueryBuilder.prefixLength(3); // 在匹配查询中设置前缀长度选项 matchQueryBuilder.maxExpansions(10); // 设置最大扩展选项以控制查询的模糊过程 // QueryBuilder matchQueryBuilder1 = QueryBuilders.matchQuery("user", "kimchy").fuzziness(Fuzziness.AUTO) // .prefixLength(3).maxExpansions(10); // sourceBuilder.query(matchQueryBuilder1); // 降序排列_score(默认) sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // 也按_id字段升序排序 sourceBuilder.sort(new FieldSortBuilder("_id").order(SortOrder.ASC)); // 搜索请求返回文档的内容,_source但是就像在Rest API中一样,您可以覆盖此行为。例如,您可以_source完全关闭检索: sourceBuilder.fetchSource(false); // 该方法还接受一个或多个通配符模式的数组,以控制以更细粒度的方式包含或排除哪些字段: String[] includeFields = new String[] { "title", "innerObject.*" }; String[] excludeFields = new String[] { "user" }; sourceBuilder.fetchSource(includeFields, excludeFields); // 突出显示搜索结果可以通过设置来实现HighlightBuilder的 // SearchSourceBuilder。通过向中添加一个或多个HighlightBuilder.Field实例, // 可以为每个字段定义不同的突出显示行为HighlightBuilder SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); HighlightBuilder highlightBuilder = new HighlightBuilder(); HighlightBuilder.Field highlightTitle = new HighlightBuilder.Field("title"); highlightTitle.highlighterType("unified"); highlightBuilder.field(highlightTitle); HighlightBuilder.Field highlightUser = new HighlightBuilder.Field("user"); highlightBuilder.field(highlightUser); searchSourceBuilder.highlighter(highlightBuilder); // 聚合 // 可以通过先创建适当的集合AggregationBuilder,然后将其设置在上,将聚合添加到搜索中 SearchSourceBuilder。 // 在以下示例中,我们terms在公司名称上创建一个汇总,并在公司中员工的平均年龄上进行子汇总: SearchSourceBuilder searchSourceBuilder2 = new SearchSourceBuilder(); TermsAggregationBuilder aggregation = AggregationBuilders.terms("by_company").field("company.keyword"); aggregation.subAggregation(AggregationBuilders.avg("average_age").field("age")); searchSourceBuilder2.aggregation(aggregation); // 要将“建议”添加到搜索请求中,请使用SuggestionBuilder可从SuggestBuilders工厂类轻松访问的实现之一。 // 建议建设者需要添加到顶层SuggestBuilder,它本身可以在上设置 SearchSourceBuilder。 SearchSourceBuilder searchSourceBuilder3 = new SearchSourceBuilder(); // TermSuggestionBuilder为user字段和文本创建一个新的kmichy SuggestionBuilder termSuggestionBuilder = SuggestBuilders.termSuggestion("user").text("kmichy"); SuggestBuilder suggestBuilder = new SuggestBuilder(); suggestBuilder.addSuggestion("suggest_user", termSuggestionBuilder); searchSourceBuilder3.suggest(suggestBuilder); try { SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // 请求执行本身的有用信息,例如HTTP状态代码,执行时间或请求是提前终止还是超时: RestStatus status = searchResponse.status(); TimeValue took = searchResponse.getTook(); Boolean terminatedEarly = searchResponse.isTerminatedEarly(); boolean timedOut = searchResponse.isTimedOut(); // 响应还提供有关受搜索影响的分片总数以及成功与不成功分片的统计信息,从而提供有关分片级别执行的信息。可能的失败也可以通过遍历数组进行处理 int totalShards = searchResponse.getTotalShards(); int successfulShards = searchResponse.getSuccessfulShards(); int failedShards = searchResponse.getFailedShards(); for (ShardSearchFailure failure : searchResponse.getShardFailures()) { // failures should be handled here } // 要访问返回的文档,我们需要首先获取SearchHits 响应中包含的内容: SearchHits hits = searchResponse.getHits(); // 将SearchHits提供所有点击全局信息,比如命中总数或最大比分: TotalHits totalHits = hits.getTotalHits(); // the total number of hits, must be interpreted in the context of // totalHits.relation long numHits = totalHits.value; // whether the number of hits is accurate (EQUAL_TO) or a lower bound of the // total (GREATER_THAN_OR_EQUAL_TO) TotalHits.Relation relation = totalHits.relation; float maxScore = hits.getMaxScore(); // 嵌套在中的SearchHits是可以迭代的各个搜索结果: SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { // SearchHit可访问索引一样,文档ID和每个搜索命中的得分基本信息: String index = hit.getIndex(); String id = hit.getId(); float score = hit.getScore(); String sourceAsString = hit.getSourceAsString(); Map<String, Object> sourceAsMap = hit.getSourceAsMap(); String documentTitle = (String) sourceAsMap.get("title"); List<Object> users = (List<Object>) sourceAsMap.get("user"); Map<String, Object> innerObject = (Map<String, Object>) sourceAsMap.get("innerObject"); } // 可以从SearchHit结果中的每个检索出突出显示的文本片段。命中对象提供对HighlightField实例的字段名称映射的访问,每个实例包含一个或多个突出显示的文本片段: SearchHits hits2 = searchResponse.getHits(); for (SearchHit hit : hits2.getHits()) { // 获得该title领域的亮点 Map<String, HighlightField> highlightFields = hit.getHighlightFields(); HighlightField highlight = highlightFields.get("title"); // 获取一个或多个包含突出显示的字段内容的片段 Text[] fragments = highlight.fragments(); String fragmentString = fragments[0].string(); } Aggregations aggregations = searchResponse.getAggregations(); Terms byCompanyAggregation = aggregations.get("by_company"); // 获取by_company条款汇总 Bucket elasticBucket = byCompanyAggregation.getBucketByKey("Elastic"); // 获取带有密钥的存储桶 Elastic Avg averageAge = elasticBucket.getAggregations().get("average_age"); // average_age从该存储桶中获取子聚合 double avg = averageAge.getValue(); // 请注意,如果按名称访问聚合,则需要根据请求的聚合类型指定聚合接口,否则ClassCastException将抛出a: // 这将引发异常,因为“ by_company”是一个terms聚合,但是我们尝试将其作为range聚合进行检索 Aggregation range = aggregations.get("by_company"); // 也可以将所有聚合作为以聚合名称作为关键字的映射来访问。在这种情况下,必须明确地强制转换为正确的聚合接口: Map<String, Aggregation> aggregationMap = aggregations.getAsMap(); Terms companyAggregation = (Terms) aggregationMap.get("by_company"); // 还有一些getter会将所有顶级聚合返回为列表: List<Aggregation> aggregationList = aggregations.asList(); for (Aggregation agg : aggregations) { String type = agg.getType(); if (type.equals(TermsAggregationBuilder.NAME)) { Bucket elasticBucket2 = ((Terms) agg).getBucketByKey("Elastic"); long numberOfDocs = elasticBucket2.getDocCount(); } } // 检索建议 Suggest suggest = searchResponse.getSuggest(); // 可以按名称检索建议。您需要将它们分配给“建议”类的正确类型(在此处TermSuggestion),否则ClassCastException抛出a TermSuggestion termSuggestion = suggest.getSuggestion("suggest_user"); for (TermSuggestion.Entry entry : termSuggestion.getEntries()) { // 遍历建议条目 for (TermSuggestion.Entry.Option option : entry) { // 遍历一个条目中的选项 String suggestText = option.getText().string(); } } // 检索Map的ProfileShardResult从SearchResponse Map<String, ProfileShardResult> profilingResults = searchResponse.getProfileResults(); // 如果知道密钥,则可以通过分片的密钥来检索分析结果,否则,对所有分析结果进行迭代可能会更简单 for (Map.Entry<String, ProfileShardResult> profilingResult : profilingResults.entrySet()) { // 检索标识ProfileShardResult属于哪个分片的密钥 String key = profilingResult.getKey(); // 检索ProfileShardResult给定分片的 ProfileShardResult profileShardResult = profilingResult.getValue(); // 所述ProfileShardResult对象本身包含一个或多个查询简档的结果,一个用于抵靠底层Lucene索引执 List<QueryProfileShardResult> queryProfileShardResults = profileShardResult.getQueryProfileResults(); for (QueryProfileShardResult queryProfileResult : queryProfileShardResults) { // 每个都QueryProfileShardResult可以访问详细的查询树执行,以ProfileResult对象列表的形式返回 // 遍历概要文件结果 for (ProfileResult profileResult : queryProfileResult.getQueryResults()) { // 检索Lucene查询的名称 String queryName = profileResult.getQueryName(); // 检索执行Lucene查询的毫秒数 long queryTimeInMillis = profileResult.getTime(); // 检索子查询的概要文件结果(如果有) List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); } // Rest API文档包含有关Lucene收集器性能分析信息的更多信息 // 检索Lucene收集器的分析结果 CollectorResult collectorResult = queryProfileResult.getCollectorResult(); // 检索Lucene收集器的名称 String collectorName = collectorResult.getName(); // 检索执行Lucene收集器所花费的毫秒数 Long collectorTimeInMillis = collectorResult.getTime(); // 检索子收集器的概要文件结果(如果有) List<CollectorResult> profiledChildren = collectorResult.getProfiledChildren(); } AggregationProfileShardResult aggsProfileResults = profileShardResult.getAggregationProfileResults(); // 遍历聚合配置文件结果 for (ProfileResult profileResult : aggsProfileResults.getProfileResults()) { /// 检索聚合的类型(与用于执行聚合的Java类相对应) String aggName = profileResult.getQueryName(); // 检索执行Lucene收集器所花费的毫秒数 long aggTimeInMillis = profileResult.getTime(); // 检索子聚合的概要文件结果(如果有) List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); } } } catch (IOException e) { e.printStackTrace(); } } // 多重搜索 public static void SearchRequest2() { MultiSearchRequest request = new MultiSearchRequest(); // 创建一个空文件SearchRequest,然后像填充常规文件一样填充它search。 SearchRequest firstSearchRequest = new SearchRequest(); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(QueryBuilders.matchQuery("user", "kimchy")); firstSearchRequest.source(searchSourceBuilder); request.add(firstSearchRequest); // 建立第二个SearchRequest并将其添加到中MultiSearchRequest。 SearchRequest secondSearchRequest = new SearchRequest(); searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(QueryBuilders.matchQuery("user", "luca")); secondSearchRequest.source(searchSourceBuilder); request.add(secondSearchRequest); SearchRequest searchRequest = new SearchRequest("posts"); try { MultiSearchResponse response = client.msearch(request, RequestOptions.DEFAULT); MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // 第一次搜索的项目 assertNull(firstResponse.getFailure()); // 成功完成,因此getFailure返回null。 SearchResponse searchResponse = firstResponse.getResponse(); assertEquals(4, searchResponse.getHits().getTotalHits().value); MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // 第二次搜索的项目。 assertNull(secondResponse.getFailure()); searchResponse = secondResponse.getResponse(); assertEquals(1, searchResponse.getHits().getTotalHits().value); } catch (IOException e) { e.printStackTrace(); } } public static void FieldCapabilitiesRequest() { // fields参数支持通配符表示法。例如,提供text_* 将导致返回所有与表达式匹配的字段。 FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().fields("user").indices("posts", "authors", "contributors"); request.indicesOptions(IndicesOptions.lenientExpandOpen()); try { FieldCapabilitiesResponse response = client.fieldCaps(request, RequestOptions.DEFAULT); // 一个Map条目字段的可能的类型,在这种情况下keyword和text。 Map<String, FieldCapabilities> userResponse = response.getField("user"); FieldCapabilities textCapabilities = userResponse.get("keyword"); boolean isSearchable = textCapabilities.isSearchable(); boolean isAggregatable = textCapabilities.isAggregatable(); // user字段类型为的所有索引keyword。 String[] indices = textCapabilities.indices(); // user字段不可搜索的这些索引的子集;如果始终可搜索,则返回null。 String[] nonSearchableIndices = textCapabilities.nonSearchableIndices(); // 这些索引的另一个子集,该user字段不可聚合;如果始终可聚合,则返回null。 String[] nonAggregatableIndices = textCapabilities.nonAggregatableIndices(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public static void EvaluationMetric() { EvaluationMetric metric = new PrecisionAtK(); List<RatedDocument> ratedDocs = new ArrayList<>(); ratedDocs.add(new RatedDocument("posts", "1", 1)); SearchSourceBuilder searchQuery = new SearchSourceBuilder(); searchQuery.query(QueryBuilders.matchQuery("user", "kimchy")); RatedRequest ratedRequest = new RatedRequest("kimchy_query", ratedDocs, searchQuery); List<RatedRequest> ratedRequests = Arrays.asList(ratedRequest); RankEvalSpec specification = new RankEvalSpec(ratedRequests, metric); RankEvalRequest request = new RankEvalRequest(specification, new String[] { "posts" }); try { RankEvalResponse response = client.rankEval(request, RequestOptions.DEFAULT); double evaluationResult = response.getMetricScore();// 综合评价结果 assertEquals(1.0 / 3.0, evaluationResult, 0.0); Map<String, EvalQueryQuality> partialResults = response.getPartialResults(); // 以查询ID为关键字的部分结果 EvalQueryQuality evalQuality = partialResults.get("kimchy_query"); assertEquals("kimchy_query", evalQuality.getId()); // 每个部分结果的指标得分 double qualityLevel = evalQuality.metricScore(); assertEquals(1.0 / 3.0, qualityLevel, 0.0); List<RatedSearchHit> hitsAndRatings = evalQuality.getHitsAndRatings(); RatedSearchHit ratedSearchHit = hitsAndRatings.get(2); // 额定搜索结果包含完整的信息 SearchHit assertEquals("3", ratedSearchHit.getSearchHit().getId()); // Optional<Integer>如果文档在请求中未获得评级,则评级搜索命中还包含不存在的评级 assertFalse(ratedSearchHit.getRating().isPresent()); MetricDetail metricDetails = evalQuality.getMetricDetails(); String metricName = metricDetails.getMetricName(); // 指标详细信息以请求中使用的指标命名 assertEquals(PrecisionAtK.NAME, metricName); PrecisionAtK.Detail detail = (PrecisionAtK.Detail) metricDetails; // 转换为请求中使用的指标后,指标详细信息可以深入了解指标计算的各个部分 assertEquals(1, detail.getRelevantRetrieved()); assertEquals(3, detail.getRetrieved()); } catch (IOException e) { e.printStackTrace(); } } // 计数 public static void CountRequest() { CountRequest countRequest = new CountRequest(); // 大多数搜索参数已添加到中SearchSourceBuilder。 SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // 向中添加match_all查询SearchSourceBuilder。 searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // 将添加SearchSourceBuilder到中CountRequest。 countRequest.source(searchSourceBuilder); // 将请求限制为索引 // 设置IndicesOptions控制如何解决不可用的索引以及如何扩展通配符表达式 // 使用首选项参数,例如,执行搜索以偏爱本地碎片。默认设置是随机分片。 CountRequest countRequest2 = new CountRequest("blog").routing("routing") .indicesOptions(IndicesOptions.lenientExpandOpen()).preference("_local"); // 设置查询。可以是任何类型QueryBuilder SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); CountRequest countRequest3 = new CountRequest(); countRequest3.indices("blog", "author"); countRequest3.source(sourceBuilder); try { CountResponse countResponse = client.count(countRequest, RequestOptions.DEFAULT); // 在CountResponse由执行所述计数API调用返回提供关于计数执行本身的请求等的HTTP状态代码,或者是否提前终止命中和细节的总数: long count = countResponse.getCount(); RestStatus status = countResponse.status(); Boolean terminatedEarly = countResponse.isTerminatedEarly(); // 响应还提供有关受基础搜索影响的分片总数以及成功与不成功分片的统计信息,从而提供有关分片级别执行的信息。 // 可能的失败也可以通过遍历数组进行处理, ShardSearchFailures如以下示例所示: int totalShards = countResponse.getTotalShards(); int skippedShards = countResponse.getSkippedShards(); int successfulShards = countResponse.getSuccessfulShards(); int failedShards = countResponse.getFailedShards(); for (ShardSearchFailure failure : countResponse.getShardFailures()) { // failures should be handled here } } catch (IOException e) { e.printStackTrace(); } } public static void query() { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); sourceBuilder.from(0); // 索引以开始搜索的选项。预设为0。 sourceBuilder.size(5); // 返回的搜索命中次数的选项。默认为10 sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // 设置一个可选的超时时间,以控制允许搜索的时间。 // // 降序排列_score(默认) // sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // // 也按_id字段升序排序 // sourceBuilder.sort(new FieldSortBuilder("_id").order(SortOrder.ASC)); // // 该方法还接受一个或多个通配符模式的数组,以控制以更细粒度的方式包含或排除哪些字段: String[] includeFields = new String[] { "title", "innerObject.*" }; String[] excludeFields = new String[] { "user" }; sourceBuilder.fetchSource(includeFields, excludeFields); SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("posts"); searchRequest.source(sourceBuilder); try { SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); SearchHits hits = searchResponse.getHits(); SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { // SearchHit可访问索引一样,文档ID和每个搜索命中的得分基本信息: // String index = hit.getIndex(); // String id = hit.getId(); // float score = hit.getScore(); String sourceAsString = hit.getSourceAsString(); Map<String, Object> sourceAsMap = hit.getSourceAsMap(); String user = (String) sourceAsMap.get("user"); String reason = (String) sourceAsMap.get("reason"); String message = (String) sourceAsMap.get("message"); String postDate = (String) sourceAsMap.get("postDate"); String id = (String) sourceAsMap.get("_id"); System.out.println(id +" " + postDate +" " + reason); System.out.println(reason +" " + user); // List<Object> users = (List<Object>) sourceAsMap.get("user"); //Map<String, Object> innerObject = (Map<String, Object>) sourceAsMap.get("innerObject"); } } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] args) { query(); System.out.println("------------ 测试结束 -------------------------"); } } fanli/src/test/java/org/fanli/elastic/QueryUtils_Test.java
New file @@ -0,0 +1,539 @@ package org.fanli.elastic; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import org.apache.http.HttpHost; import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.rankeval.EvalQueryQuality; import org.elasticsearch.index.rankeval.EvaluationMetric; import org.elasticsearch.index.rankeval.MetricDetail; import org.elasticsearch.index.rankeval.PrecisionAtK; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.index.rankeval.RankEvalResponse; import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedDocument; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.Avg; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.profile.ProfileResult; import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult; import org.elasticsearch.search.profile.query.CollectorResult; import org.elasticsearch.search.profile.query.QueryProfileShardResult; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.search.suggest.SuggestionBuilder; import org.elasticsearch.search.suggest.term.TermSuggestion; public class QueryUtils_Test { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void SearchRequest() { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); sourceBuilder.from(0); // 索引以开始搜索的选项。预设为0。 sourceBuilder.size(5); // 返回的搜索命中次数的选项。默认为10 sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // 设置一个可选的超时时间,以控制允许搜索的时间。 SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("posts"); searchRequest.source(sourceBuilder); // 创建与“用户”字段上的文本“ kimchy”匹配的全文匹配查询。 MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("user", "kimchy"); matchQueryBuilder.fuzziness(Fuzziness.AUTO); // 对匹配查询启用模糊匹配 matchQueryBuilder.prefixLength(3); // 在匹配查询中设置前缀长度选项 matchQueryBuilder.maxExpansions(10); // 设置最大扩展选项以控制查询的模糊过程 // QueryBuilder matchQueryBuilder1 = QueryBuilders.matchQuery("user", "kimchy").fuzziness(Fuzziness.AUTO) // .prefixLength(3).maxExpansions(10); // sourceBuilder.query(matchQueryBuilder1); // 降序排列_score(默认) sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // 也按_id字段升序排序 sourceBuilder.sort(new FieldSortBuilder("_id").order(SortOrder.ASC)); // 搜索请求返回文档的内容,_source但是就像在Rest API中一样,您可以覆盖此行为。例如,您可以_source完全关闭检索: sourceBuilder.fetchSource(false); // 该方法还接受一个或多个通配符模式的数组,以控制以更细粒度的方式包含或排除哪些字段: String[] includeFields = new String[] { "title", "innerObject.*" }; String[] excludeFields = new String[] { "user" }; sourceBuilder.fetchSource(includeFields, excludeFields); // 突出显示搜索结果可以通过设置来实现HighlightBuilder的 // SearchSourceBuilder。通过向中添加一个或多个HighlightBuilder.Field实例, // 可以为每个字段定义不同的突出显示行为HighlightBuilder SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); HighlightBuilder highlightBuilder = new HighlightBuilder(); HighlightBuilder.Field highlightTitle = new HighlightBuilder.Field("title"); highlightTitle.highlighterType("unified"); highlightBuilder.field(highlightTitle); HighlightBuilder.Field highlightUser = new HighlightBuilder.Field("user"); highlightBuilder.field(highlightUser); searchSourceBuilder.highlighter(highlightBuilder); // 聚合 // 可以通过先创建适当的集合AggregationBuilder,然后将其设置在上,将聚合添加到搜索中 SearchSourceBuilder。 // 在以下示例中,我们terms在公司名称上创建一个汇总,并在公司中员工的平均年龄上进行子汇总: SearchSourceBuilder searchSourceBuilder2 = new SearchSourceBuilder(); TermsAggregationBuilder aggregation = AggregationBuilders.terms("by_company").field("company.keyword"); aggregation.subAggregation(AggregationBuilders.avg("average_age").field("age")); searchSourceBuilder2.aggregation(aggregation); // 要将“建议”添加到搜索请求中,请使用SuggestionBuilder可从SuggestBuilders工厂类轻松访问的实现之一。 // 建议建设者需要添加到顶层SuggestBuilder,它本身可以在上设置 SearchSourceBuilder。 SearchSourceBuilder searchSourceBuilder3 = new SearchSourceBuilder(); // TermSuggestionBuilder为user字段和文本创建一个新的kmichy SuggestionBuilder termSuggestionBuilder = SuggestBuilders.termSuggestion("user").text("kmichy"); SuggestBuilder suggestBuilder = new SuggestBuilder(); suggestBuilder.addSuggestion("suggest_user", termSuggestionBuilder); searchSourceBuilder3.suggest(suggestBuilder); // MultiMatchQueryBuilder multiMatch1 = QueryBuilders.multiMatchQuery(key, "orderNo", "goodsName").fuzziness(Fuzziness.AUTO); // MultiMatchQueryBuilder multiMatch2 = QueryBuilders.multiMatchQuery(uid, "uid", "uidDirect", "uidIndirect"); // SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); // sourceBuilder.query(multiMatch1).postFilter(multiMatch2); // BoolQueryBuilder should = QueryBuilders.boolQuery().should(QueryBuilders.termQuery("orderNo", key)).should(QueryBuilders.termQuery("goodsName", key)); // FuzzyQueryBuilder fuzzyQuery = QueryBuilders.fuzzyQuery("goodsName", key); // QueryBuilders.termsQuery(name, values) // MoreLikeThisQueryBuilder moreLike = QueryBuilders.moreLikeThisQuery(new String[] { "orderNo","goodsName"}, new String[] {key}, null); // 通配符 // WildcardQueryBuilder wildcardQuery = QueryBuilders.wildcardQuery("goodsName", "*" + key + "*"); // TermQueryBuilder term1 = QueryBuilders.termQuery("goodsName", key); // BoolQueryBuilder must = QueryBuilders.boolQuery().must(term1); try { SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); // 请求执行本身的有用信息,例如HTTP状态代码,执行时间或请求是提前终止还是超时: RestStatus status = searchResponse.status(); TimeValue took = searchResponse.getTook(); Boolean terminatedEarly = searchResponse.isTerminatedEarly(); boolean timedOut = searchResponse.isTimedOut(); // 响应还提供有关受搜索影响的分片总数以及成功与不成功分片的统计信息,从而提供有关分片级别执行的信息。可能的失败也可以通过遍历数组进行处理 int totalShards = searchResponse.getTotalShards(); int successfulShards = searchResponse.getSuccessfulShards(); int failedShards = searchResponse.getFailedShards(); for (ShardSearchFailure failure : searchResponse.getShardFailures()) { // failures should be handled here } // 要访问返回的文档,我们需要首先获取SearchHits 响应中包含的内容: SearchHits hits = searchResponse.getHits(); // 将SearchHits提供所有点击全局信息,比如命中总数或最大比分: TotalHits totalHits = hits.getTotalHits(); // the total number of hits, must be interpreted in the context of // totalHits.relation long numHits = totalHits.value; // whether the number of hits is accurate (EQUAL_TO) or a lower bound of the // total (GREATER_THAN_OR_EQUAL_TO) TotalHits.Relation relation = totalHits.relation; float maxScore = hits.getMaxScore(); // 嵌套在中的SearchHits是可以迭代的各个搜索结果: SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { // SearchHit可访问索引一样,文档ID和每个搜索命中的得分基本信息: String index = hit.getIndex(); String id = hit.getId(); float score = hit.getScore(); String sourceAsString = hit.getSourceAsString(); Map<String, Object> sourceAsMap = hit.getSourceAsMap(); String documentTitle = (String) sourceAsMap.get("title"); List<Object> users = (List<Object>) sourceAsMap.get("user"); Map<String, Object> innerObject = (Map<String, Object>) sourceAsMap.get("innerObject"); } // 可以从SearchHit结果中的每个检索出突出显示的文本片段。命中对象提供对HighlightField实例的字段名称映射的访问,每个实例包含一个或多个突出显示的文本片段: SearchHits hits2 = searchResponse.getHits(); for (SearchHit hit : hits2.getHits()) { // 获得该title领域的亮点 Map<String, HighlightField> highlightFields = hit.getHighlightFields(); HighlightField highlight = highlightFields.get("title"); // 获取一个或多个包含突出显示的字段内容的片段 Text[] fragments = highlight.fragments(); String fragmentString = fragments[0].string(); } Aggregations aggregations = searchResponse.getAggregations(); Terms byCompanyAggregation = aggregations.get("by_company"); // 获取by_company条款汇总 Bucket elasticBucket = byCompanyAggregation.getBucketByKey("Elastic"); // 获取带有密钥的存储桶 Elastic Avg averageAge = elasticBucket.getAggregations().get("average_age"); // average_age从该存储桶中获取子聚合 double avg = averageAge.getValue(); // 请注意,如果按名称访问聚合,则需要根据请求的聚合类型指定聚合接口,否则ClassCastException将抛出a: // 这将引发异常,因为“ by_company”是一个terms聚合,但是我们尝试将其作为range聚合进行检索 Aggregation range = aggregations.get("by_company"); // 也可以将所有聚合作为以聚合名称作为关键字的映射来访问。在这种情况下,必须明确地强制转换为正确的聚合接口: Map<String, Aggregation> aggregationMap = aggregations.getAsMap(); Terms companyAggregation = (Terms) aggregationMap.get("by_company"); // 还有一些getter会将所有顶级聚合返回为列表: List<Aggregation> aggregationList = aggregations.asList(); for (Aggregation agg : aggregations) { String type = agg.getType(); if (type.equals(TermsAggregationBuilder.NAME)) { Bucket elasticBucket2 = ((Terms) agg).getBucketByKey("Elastic"); long numberOfDocs = elasticBucket2.getDocCount(); } } // 检索建议 Suggest suggest = searchResponse.getSuggest(); // 可以按名称检索建议。您需要将它们分配给“建议”类的正确类型(在此处TermSuggestion),否则ClassCastException抛出a TermSuggestion termSuggestion = suggest.getSuggestion("suggest_user"); for (TermSuggestion.Entry entry : termSuggestion.getEntries()) { // 遍历建议条目 for (TermSuggestion.Entry.Option option : entry) { // 遍历一个条目中的选项 String suggestText = option.getText().string(); } } // 检索Map的ProfileShardResult从SearchResponse Map<String, ProfileShardResult> profilingResults = searchResponse.getProfileResults(); // 如果知道密钥,则可以通过分片的密钥来检索分析结果,否则,对所有分析结果进行迭代可能会更简单 for (Map.Entry<String, ProfileShardResult> profilingResult : profilingResults.entrySet()) { // 检索标识ProfileShardResult属于哪个分片的密钥 String key = profilingResult.getKey(); // 检索ProfileShardResult给定分片的 ProfileShardResult profileShardResult = profilingResult.getValue(); // 所述ProfileShardResult对象本身包含一个或多个查询简档的结果,一个用于抵靠底层Lucene索引执 List<QueryProfileShardResult> queryProfileShardResults = profileShardResult.getQueryProfileResults(); for (QueryProfileShardResult queryProfileResult : queryProfileShardResults) { // 每个都QueryProfileShardResult可以访问详细的查询树执行,以ProfileResult对象列表的形式返回 // 遍历概要文件结果 for (ProfileResult profileResult : queryProfileResult.getQueryResults()) { // 检索Lucene查询的名称 String queryName = profileResult.getQueryName(); // 检索执行Lucene查询的毫秒数 long queryTimeInMillis = profileResult.getTime(); // 检索子查询的概要文件结果(如果有) List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); } // Rest API文档包含有关Lucene收集器性能分析信息的更多信息 // 检索Lucene收集器的分析结果 CollectorResult collectorResult = queryProfileResult.getCollectorResult(); // 检索Lucene收集器的名称 String collectorName = collectorResult.getName(); // 检索执行Lucene收集器所花费的毫秒数 Long collectorTimeInMillis = collectorResult.getTime(); // 检索子收集器的概要文件结果(如果有) List<CollectorResult> profiledChildren = collectorResult.getProfiledChildren(); } AggregationProfileShardResult aggsProfileResults = profileShardResult.getAggregationProfileResults(); // 遍历聚合配置文件结果 for (ProfileResult profileResult : aggsProfileResults.getProfileResults()) { /// 检索聚合的类型(与用于执行聚合的Java类相对应) String aggName = profileResult.getQueryName(); // 检索执行Lucene收集器所花费的毫秒数 long aggTimeInMillis = profileResult.getTime(); // 检索子聚合的概要文件结果(如果有) List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); } } } catch (IOException e) { e.printStackTrace(); } } // 多重搜索 public static void SearchRequest2() { MultiSearchRequest request = new MultiSearchRequest(); // 创建一个空文件SearchRequest,然后像填充常规文件一样填充它search。 SearchRequest firstSearchRequest = new SearchRequest(); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(QueryBuilders.matchQuery("user", "kimchy")); firstSearchRequest.source(searchSourceBuilder); request.add(firstSearchRequest); // 建立第二个SearchRequest并将其添加到中MultiSearchRequest。 SearchRequest secondSearchRequest = new SearchRequest(); searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(QueryBuilders.matchQuery("user", "luca")); secondSearchRequest.source(searchSourceBuilder); request.add(secondSearchRequest); SearchRequest searchRequest = new SearchRequest("posts"); try { MultiSearchResponse response = client.msearch(request, RequestOptions.DEFAULT); MultiSearchResponse.Item firstResponse = response.getResponses()[0]; // 第一次搜索的项目 assertNull(firstResponse.getFailure()); // 成功完成,因此getFailure返回null。 SearchResponse searchResponse = firstResponse.getResponse(); assertEquals(4, searchResponse.getHits().getTotalHits().value); MultiSearchResponse.Item secondResponse = response.getResponses()[1]; // 第二次搜索的项目。 assertNull(secondResponse.getFailure()); searchResponse = secondResponse.getResponse(); assertEquals(1, searchResponse.getHits().getTotalHits().value); } catch (IOException e) { e.printStackTrace(); } } public static void FieldCapabilitiesRequest() { // fields参数支持通配符表示法。例如,提供text_* 将导致返回所有与表达式匹配的字段。 FieldCapabilitiesRequest request = new FieldCapabilitiesRequest().fields("user").indices("posts", "authors", "contributors"); request.indicesOptions(IndicesOptions.lenientExpandOpen()); try { FieldCapabilitiesResponse response = client.fieldCaps(request, RequestOptions.DEFAULT); // 一个Map条目字段的可能的类型,在这种情况下keyword和text。 Map<String, FieldCapabilities> userResponse = response.getField("user"); FieldCapabilities textCapabilities = userResponse.get("keyword"); boolean isSearchable = textCapabilities.isSearchable(); boolean isAggregatable = textCapabilities.isAggregatable(); // user字段类型为的所有索引keyword。 String[] indices = textCapabilities.indices(); // user字段不可搜索的这些索引的子集;如果始终可搜索,则返回null。 String[] nonSearchableIndices = textCapabilities.nonSearchableIndices(); // 这些索引的另一个子集,该user字段不可聚合;如果始终可聚合,则返回null。 String[] nonAggregatableIndices = textCapabilities.nonAggregatableIndices(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } public static void EvaluationMetric() { EvaluationMetric metric = new PrecisionAtK(); List<RatedDocument> ratedDocs = new ArrayList<>(); ratedDocs.add(new RatedDocument("posts", "1", 1)); SearchSourceBuilder searchQuery = new SearchSourceBuilder(); searchQuery.query(QueryBuilders.matchQuery("user", "kimchy")); RatedRequest ratedRequest = new RatedRequest("kimchy_query", ratedDocs, searchQuery); List<RatedRequest> ratedRequests = Arrays.asList(ratedRequest); RankEvalSpec specification = new RankEvalSpec(ratedRequests, metric); RankEvalRequest request = new RankEvalRequest(specification, new String[] { "posts" }); try { RankEvalResponse response = client.rankEval(request, RequestOptions.DEFAULT); double evaluationResult = response.getMetricScore();// 综合评价结果 assertEquals(1.0 / 3.0, evaluationResult, 0.0); Map<String, EvalQueryQuality> partialResults = response.getPartialResults(); // 以查询ID为关键字的部分结果 EvalQueryQuality evalQuality = partialResults.get("kimchy_query"); assertEquals("kimchy_query", evalQuality.getId()); // 每个部分结果的指标得分 double qualityLevel = evalQuality.metricScore(); assertEquals(1.0 / 3.0, qualityLevel, 0.0); List<RatedSearchHit> hitsAndRatings = evalQuality.getHitsAndRatings(); RatedSearchHit ratedSearchHit = hitsAndRatings.get(2); // 额定搜索结果包含完整的信息 SearchHit assertEquals("3", ratedSearchHit.getSearchHit().getId()); // Optional<Integer>如果文档在请求中未获得评级,则评级搜索命中还包含不存在的评级 assertFalse(ratedSearchHit.getRating().isPresent()); MetricDetail metricDetails = evalQuality.getMetricDetails(); String metricName = metricDetails.getMetricName(); // 指标详细信息以请求中使用的指标命名 assertEquals(PrecisionAtK.NAME, metricName); PrecisionAtK.Detail detail = (PrecisionAtK.Detail) metricDetails; // 转换为请求中使用的指标后,指标详细信息可以深入了解指标计算的各个部分 assertEquals(1, detail.getRelevantRetrieved()); assertEquals(3, detail.getRetrieved()); } catch (IOException e) { e.printStackTrace(); } } // 计数 public static void CountRequest() { CountRequest countRequest = new CountRequest(); // 大多数搜索参数已添加到中SearchSourceBuilder。 SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // 向中添加match_all查询SearchSourceBuilder。 searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // 将添加SearchSourceBuilder到中CountRequest。 countRequest.source(searchSourceBuilder); // 将请求限制为索引 // 设置IndicesOptions控制如何解决不可用的索引以及如何扩展通配符表达式 // 使用首选项参数,例如,执行搜索以偏爱本地碎片。默认设置是随机分片。 CountRequest countRequest2 = new CountRequest("blog").routing("routing") .indicesOptions(IndicesOptions.lenientExpandOpen()).preference("_local"); // 设置查询。可以是任何类型QueryBuilder SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); CountRequest countRequest3 = new CountRequest(); countRequest3.indices("blog", "author"); countRequest3.source(sourceBuilder); try { CountResponse countResponse = client.count(countRequest, RequestOptions.DEFAULT); // 在CountResponse由执行所述计数API调用返回提供关于计数执行本身的请求等的HTTP状态代码,或者是否提前终止命中和细节的总数: long count = countResponse.getCount(); RestStatus status = countResponse.status(); Boolean terminatedEarly = countResponse.isTerminatedEarly(); // 响应还提供有关受基础搜索影响的分片总数以及成功与不成功分片的统计信息,从而提供有关分片级别执行的信息。 // 可能的失败也可以通过遍历数组进行处理, ShardSearchFailures如以下示例所示: int totalShards = countResponse.getTotalShards(); int skippedShards = countResponse.getSkippedShards(); int successfulShards = countResponse.getSuccessfulShards(); int failedShards = countResponse.getFailedShards(); for (ShardSearchFailure failure : countResponse.getShardFailures()) { // failures should be handled here } } catch (IOException e) { e.printStackTrace(); } } public static void termQuery() { SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(QueryBuilders.termQuery("id", "101")); sourceBuilder.from(0); // 索引以开始搜索的选项。预设为0。 sourceBuilder.size(5); // 返回的搜索命中次数的选项。默认为10 sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // 设置一个可选的超时时间,以控制允许搜索的时间。 SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("shose"); searchRequest.source(sourceBuilder); try { SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); SearchHits hits = searchResponse.getHits(); SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { String sourceAsString = hit.getSourceAsString(); System.out.println(sourceAsString); } } catch (IOException e) { e.printStackTrace(); } } public static void MatchQuery() { // 创建与“name”字段上的文本“测试”匹配的全文匹配查询。 // {"price":"5115.22","name":"测试第一次","id":"101","detail":"测试"} // {"id":"995","name":"测试中的5","price":"494985","detail":"测试进欧冠任何5"} MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("name", "测试"); matchQueryBuilder.fuzziness(Fuzziness.AUTO); // 对匹配查询启用模糊匹配 matchQueryBuilder.prefixLength(3); // 在匹配查询中设置前缀长度选项 matchQueryBuilder.maxExpansions(10); // 设置最大扩展选项以控制查询的模糊过程 // 加入第二个条件 MatchQueryBuilder matchQueryBuilder2 = new MatchQueryBuilder("detail", "测"); SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); sourceBuilder.query(matchQueryBuilder).query(matchQueryBuilder2); ///.query(QueryBuilders.termQuery("id", "101")); SearchRequest searchRequest = new SearchRequest(); searchRequest.indices("shose"); searchRequest.source(sourceBuilder); try { SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT); SearchHits hits = searchResponse.getHits(); SearchHit[] searchHits = hits.getHits(); for (SearchHit hit : searchHits) { String sourceAsString = hit.getSourceAsString(); System.out.println(sourceAsString); } } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] args) { MatchQuery(); System.out.println("------------ 测试结束 -------------------------"); } } fanli/src/test/java/org/fanli/elastic/Test_Query.java
New file @@ -0,0 +1,76 @@ package org.fanli.elastic; import java.util.List; import org.junit.Test; import com.yeshi.fanli.dao.elastic.ESOrderDao; import com.yeshi.fanli.entity.order.ESOrder; import com.yeshi.fanli.util.BeanUtil; public class Test_Query { // @Test public void add() { try { ESOrderDao dao = BeanUtil.getBean(ESOrderDao.class); ESOrder order = new ESOrder(); order.setUid(974767L); // order.setUidDirect(974767L); // order.setUidIndirect(333808L); order.setPlatform(2); order.setOrderNo("102594835359"); order.setTradeId("102594835359-100000650837"); order.setGoodsName("荣耀畅玩8C两天一充 莱茵护眼 刘海屏 全网通版4GB+32GB 幻夜黑 移动联通电信4G全面屏手机 双卡双待"); dao.save(order, order.getPlatform() + "#" + order.getTradeId()); System.out.println("-------------结束-------------"); } catch (Exception e) { e.printStackTrace(); } } // @Test public void update() { try { ESOrderDao dao = BeanUtil.getBean(ESOrderDao.class); ESOrder order = new ESOrder(); order.setPlatform(1); System.out.println("-------------结束-------------"); } catch (Exception e) { e.printStackTrace(); } } // @Test public void detele() { try { ESOrderDao dao = BeanUtil.getBean(ESOrderDao.class); dao.delete("1_102205518158-42030576999"); System.out.println("-------------结束-------------"); } catch (Exception e) { e.printStackTrace(); } } @Test public void queryMatch() { try { ESOrderDao dao = BeanUtil.getBean(ESOrderDao.class); List<ESOrder> list = dao.query("8C", "974767"); for (ESOrder esOrder: list) { System.out.println(esOrder.getUid() + " "+ esOrder.getGoodsName()); System.out.println(esOrder.getUidDirect() + " "+ esOrder.getUidIndirect()); } System.out.println("-------------结束-------------"); } catch (Exception e) { e.printStackTrace(); } } } fanli/src/test/java/org/fanli/elastic/UpdateUtils.java
New file @@ -0,0 +1,293 @@ package org.fanli.elastic; import java.io.IOException; import java.util.Date; import java.util.HashMap; import java.util.Map; import org.apache.http.HttpHost; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; public class UpdateUtils { private static String clusterName = "my-application"; private static String host = "192.168.1.200"; private static Integer port = 9200; // 相当于数据库名称 public static String indexName = "shose"; // 初始化api客户端 public static RestHighLevelClient client = new RestHighLevelClient( RestClient.builder(new HttpHost(host, port, "http"))); public static void update1() { // Index 、 Document id UpdateRequest request = new UpdateRequest("posts", "1"); // 1、Script 更新 Map<String, Object> parameters = new HashMap<String, Object>(); parameters.put("count", 4); Script inline = new Script(ScriptType.INLINE, "painless", "ctx._source.field += params.count", parameters); request.script(inline); // 2、Script 更新 Script stored = new Script(ScriptType.STORED, null, "increment-field", parameters); request.script(stored); // 1、部分文档源String以JSON格式提供 String jsonString = "{" + "\"updated\":\"2017-01-01\"," + "\"reason\":\"daily update\"" + "}"; request.doc(jsonString, XContentType.JSON); // Map Map<String, Object> jsonMap = new HashMap<>(); jsonMap.put("updated", new Date()); jsonMap.put("reason", "daily update"); UpdateRequest request2 = new UpdateRequest("posts", "1").doc(jsonMap); // try { XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); { builder.timeField("updated", new Date()); builder.field("reason", "daily update"); } builder.endObject(); UpdateRequest request3 = new UpdateRequest("posts", "1").doc(builder); } catch (Exception e) { e.printStackTrace(); } UpdateRequest request4 = new UpdateRequest("posts", "1").doc("updated", new Date(), "reason", "daily update"); String jsonString2 = "{\"created\":\"2017-01-01\"}"; request.upsert(jsonString2, XContentType.JSON); // 为特定字段配置源包含 String[] includes = new String[] { "updated", "r*" }; String[] excludes = Strings.EMPTY_ARRAY; request.fetchSource(new FetchSourceContext(true, includes, excludes)); // 为特定字段配置源排除 String[] includes2 = Strings.EMPTY_ARRAY; String[] excludes2 = new String[] { "updated" }; request.fetchSource(new FetchSourceContext(true, includes2, excludes2)); request.setIfSeqNo(2L); request.setIfPrimaryTerm(1L); // 禁用noop检测 request.detectNoop(false); // 指示脚本必须运行,而不管文档是否存在,即,脚本将负责创建尚不存在的文档。 request.scriptedUpsert(true); // 指示部分文档(如果尚不存在)必须用作upsert文档。 request.docAsUpsert(true); // 设置在继续更新操作之前必须处于活动状态的分片副本数 request.waitForActiveShards(2); // 作为提供碎片拷贝数ActiveShardCount:可以是ActiveShardCount.ALL, // ActiveShardCount.ONE或ActiveShardCount.DEFAULT(默认) request.waitForActiveShards(ActiveShardCount.ALL); } public static void update2() { // 添加列 UpdateRequest request = new UpdateRequest("posts", "1"); String jsonString = "{" + "\"updated\":\"2017-01-01\"," + "\"reason\":\"daily update\"" + "}"; request.doc(jsonString, XContentType.JSON); try { UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); // // // 返回的值UpdateResponse允许检索有关已执行操作的信息 // String index = updateResponse.getIndex(); // String id = updateResponse.getId(); // long version = updateResponse.getVersion(); // if (updateResponse.getResult() == DocWriteResponse.Result.CREATED) { // // 处理首次创建文档的情况(upsert) // } else if (updateResponse.getResult() == DocWriteResponse.Result.UPDATED) { // // 处理文档更新的情况 // } else if (updateResponse.getResult() == DocWriteResponse.Result.DELETED) { // // 处理文件被删除的情况 // } else if (updateResponse.getResult() == DocWriteResponse.Result.NOOP) { // // 处理文档不受更新影响的情况,即未对文档执行任何操作(空转) // } // // // UpdateRequest 通过fetchSource方法启用了源检索后,响应将包含更新文档的源: // GetResult result = updateResponse.getGetResult(); // 检索更新的文档为 GetResult // if (result.isExists()) { // // 检索更新文档的来源为 String // String sourceAsString = result.sourceAsString(); // // 检索更新文档的来源为 Map<String, Object> // Map<String, Object> sourceAsMap = result.sourceAsMap(); // // 检索更新文档的来源为 byte[] // byte[] sourceAsBytes = result.source(); // } else { // // 处理响应中不存在文档源的情况(默认情况下就是这种情况) // } // // // 可以检查分片故障: // ReplicationResponse.ShardInfo shardInfo = updateResponse.getShardInfo(); // if (shardInfo.getTotal() != shardInfo.getSuccessful()) { // // 处理成功分片数量少于总分片数量的情况 // } // if (shardInfo.getFailed() > 0) { // for (ReplicationResponse.ShardInfo.Failure failure : shardInfo.getFailures()) { // String reason = failure.reason(); // } // // 处理潜在的故障 // } // // // 当UpdateRequest抵靠不存在一个文件执行时,响应具有404状态码,ElasticsearchException获取引发这就需要如下进行处理: // UpdateRequest request2 = new UpdateRequest("posts", "does_not_exist").doc("field", "value"); // try { // UpdateResponse updateResponse2 = client.update(request2, RequestOptions.DEFAULT); // } catch (ElasticsearchException e) { // if (e.status() == RestStatus.NOT_FOUND) { // // 处理由于文档不存在而引发的异常 // } // } // // // 如果存在版本冲突,ElasticsearchException将引发: // UpdateRequest request3 = new UpdateRequest("posts", "1").doc("field", "value").setIfSeqNo(101L) // .setIfPrimaryTerm(200L); // try { // UpdateResponse updateResponse3 = client.update(request3, RequestOptions.DEFAULT); // } catch (ElasticsearchException e) { // if (e.status() == RestStatus.CONFLICT) { // // 引发的异常表示返回了版本冲突错误。 // } // } } catch (Exception e) { e.printStackTrace(); } } public static void update3() { UpdateRequest request = new UpdateRequest("posts", "1"); String jsonString = "{" + "\"updated\":\"2019-09-09\"," + "\"reason\":\"daily update9\"" + "}"; request.doc(jsonString, XContentType.JSON); try { UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT); } catch (Exception e) { e.printStackTrace(); } } public static void Multupdate1() { // 批量API仅支持以JSON或SMILE编码的文档。提供任何其他格式的文档将导致错误。 BulkRequest request = new BulkRequest(); request.add(new IndexRequest("posts").id("1").source(XContentType.JSON, "field", "foo")); request.add(new IndexRequest("posts").id("2").source(XContentType.JSON, "field", "bar")); request.add(new IndexRequest("posts").id("3").source(XContentType.JSON, "field", "baz")); BulkRequest request2 = new BulkRequest(); request2.add(new DeleteRequest("posts", "3")); request2.add(new UpdateRequest("posts", "2").doc(XContentType.JSON, "other", "test")); request2.add(new IndexRequest("posts").id("4").source(XContentType.JSON, "field", "baz")); // 可选参数 request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); request.setRefreshPolicy("wait_for"); request.pipeline("pipelineId"); request.routing("routingId"); BulkRequest defaulted = new BulkRequest("posts"); // 开始执行 try { BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT); for (BulkItemResponse bulkItemResponse : bulkResponse) { DocWriteResponse itemResponse = bulkItemResponse.getResponse(); switch (bulkItemResponse.getOpType()) { case INDEX: // 检索操作的响应(成功与否),可以是 IndexResponse,UpdateResponse或DeleteResponse可全部被视为 // DocWriteResponse实例 case CREATE: // 处理更新操作的响应 IndexResponse indexResponse = (IndexResponse) itemResponse; break; case UPDATE: UpdateResponse updateResponse = (UpdateResponse) itemResponse; // 处理更新操作的响应 break; case DELETE: // 处理删除操作的响应 DeleteResponse deleteResponse = (DeleteResponse) itemResponse; } } if (bulkResponse.hasFailures()) { // 批量响应提供了一种方法来快速检查一个或多个操作是否失败: } // 这种情况下,有必要遍历所有操作结果,以检查操作是否失败,如果失败,则检索相应的失败: for (BulkItemResponse bulkItemResponse : bulkResponse) { if (bulkItemResponse.isFailed()) { BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); } } BulkProcessor.Listener listener = new BulkProcessor.Listener() { @Override public void beforeBulk(long executionId, BulkRequest request) { } @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { } @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { } }; } catch (IOException e) { e.printStackTrace(); } } public static void main(String[] args) { update3(); System.out.println("------------ 测试结束 -------------------------"); } }