diff --git a/.github/workflows/ci_build.yml b/.github/workflows/ci_build.yml index 073d46522..2544c5706 100644 --- a/.github/workflows/ci_build.yml +++ b/.github/workflows/ci_build.yml @@ -2,9 +2,9 @@ name: KnowStreaming Build on: push: - branches: [ "master", "ve_3.x", "ve_demo_3.x" ] + branches: [ "*" ] pull_request: - branches: [ "master", "ve_3.x", "ve_demo_3.x" ] + branches: [ "*" ] jobs: build: diff --git a/README.md b/README.md index d95a05874..ac9415bec 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,9 @@ **点击 [这里](https://doc.knowstreaming.com/product),也可以从官网获取到更多文档** - +**`产品网址`** +- [产品官网:https://knowstreaming.com](https://knowstreaming.com) +- [体验环境:https://demo.knowstreaming.com](https://demo.knowstreaming.com),登陆账号:admin/admin @@ -144,7 +146,7 @@ PS: 提问请尽量把问题一次性描述清楚,并告知环境信息情况 **`2、微信群`** -微信加群:添加`mike_zhangliang`、`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。 +微信加群:添加`PenceXie` 、`szzdzhp001`的微信号备注KnowStreaming加群。
加群之前有劳点一下 star,一个小小的 star 是对KnowStreaming作者们努力建设社区的动力。 diff --git a/Releases_Notes.md b/Releases_Notes.md index a606ef722..ce9a33d89 100644 --- a/Releases_Notes.md +++ b/Releases_Notes.md @@ -1,4 +1,78 @@ +## v3.4.0 + + + +**问题修复** +- [Bugfix]修复 Overview 指标文案错误的错误 ([#1190](https://github.com/didi/KnowStreaming/issues/1190)) +- [Bugfix]修复删除 Kafka 集群后,Connect 集群任务出现 NPE 问题 ([#1129](https://github.com/didi/KnowStreaming/issues/1129)) +- [Bugfix]修复在 Ldap 登录时,设置 auth-user-registration: false 会导致空指针的问题 ([#1117](https://github.com/didi/KnowStreaming/issues/1117)) +- [Bugfix]修复 Ldap 登录,调用 user.getId() 出现 NPE 的问题 ([#1108](https://github.com/didi/KnowStreaming/issues/1108)) +- [Bugfix]修复前端新增角色失败等问题 ([#1107](https://github.com/didi/KnowStreaming/issues/1107)) +- [Bugfix]修复 ZK 四字命令解析错误的问题 +- [Bugfix]修复 zk standalone 模式下,状态获取错误的问题 +- [Bugfix]修复 Broker 元信息解析方法未调用导致接入集群失败的问题 ([#993](https://github.com/didi/KnowStreaming/issues/993)) +- [Bugfix]修复 ConsumerAssignment 类型转换错误的问题 +- [Bugfix]修复对 Connect 集群的 clusterUrl 的动态更新导致配置不生效的问题 ([#1079](https://github.com/didi/KnowStreaming/issues/1079)) +- [Bugfix]修复消费组不支持重置到最旧 Offset 的问题 ([#1059](https://github.com/didi/KnowStreaming/issues/1059)) +- [Bugfix]后端增加查看 User 密码的权限点 ([#1095](https://github.com/didi/KnowStreaming/issues/1095)) +- [Bugfix]修复 Connect-JMX 端口维护信息错误的问题 ([#1146](https://github.com/didi/KnowStreaming/issues/1146)) +- [Bugfix]修复系统管理子应用无法正常启动的问题 ([#1167](https://github.com/didi/KnowStreaming/issues/1167)) +- [Bugfix]修复 Security 模块,权限点缺失问题 ([#1069](https://github.com/didi/KnowStreaming/issues/1069)), ([#1154](https://github.com/didi/KnowStreaming/issues/1154)) +- [Bugfix]修复 Connect-Worker Jmx 不生效的问题 ([#1067](https://github.com/didi/KnowStreaming/issues/1067)) +- [Bugfix]修复权限 ACL 管理中,消费组列表展示错误的问题 ([#1037](https://github.com/didi/KnowStreaming/issues/1037)) +- [Bugfix]修复 Connect 模块没有默认勾选指标的问题([#1022](https://github.com/didi/KnowStreaming/issues/1022)) +- [Bugfix]修复 es 索引 create/delete 死循环的问题 ([#1021](https://github.com/didi/KnowStreaming/issues/1021)) +- [Bugfix]修复 Connect-GroupDescription 解析失败的问题 ([#1015](https://github.com/didi/KnowStreaming/issues/1015)) +- [Bugfix]修复 Prometheus 开放接口中,Partition 指标 tag 缺失的问题 ([#1014](https://github.com/didi/KnowStreaming/issues/1014)) +- [Bugfix]修复 Topic 消息展示,offset 为 0 不显示的问题 ([#1192](https://github.com/didi/KnowStreaming/issues/1192)) +- [Bugfix]修复重置offset接口调用过多问题 +- [Bugfix]Connect 提交任务变更为只保存用户修改的配置,并修复 JSON 模式下配置展示不全的问题 ([#1158](https://github.com/didi/KnowStreaming/issues/1158)) +- [Bugfix]修复消费组 Offset 重置后,提示重置成功,但是前端不刷新数据,Offset 无变化的问题 ([#1090](https://github.com/didi/KnowStreaming/issues/1090)) +- [Bugfix]修复未勾选系统管理查看权限,但是依然可以查看系统管理的问题 ([#1105](https://github.com/didi/KnowStreaming/issues/1105)) + + +**产品优化** +- [Optimize]补充接入集群时,可选的 Kafka 版本列表 ([#1204](https://github.com/didi/KnowStreaming/issues/1204)) +- [Optimize]GroupTopic 信息修改为实时获取 ([#1196](https://github.com/didi/KnowStreaming/issues/1196)) +- [Optimize]增加 AdminClient 观测信息 ([#1111](https://github.com/didi/KnowStreaming/issues/1111)) +- [Optimize]增加 Connector 运行状态指标 ([#1110](https://github.com/didi/KnowStreaming/issues/1110)) +- [Optimize]统一 DB 元信息更新格式 ([#1127](https://github.com/didi/KnowStreaming/issues/1127)), ([#1125](https://github.com/didi/KnowStreaming/issues/1125)), ([#1006](https://github.com/didi/KnowStreaming/issues/1006)) +- [Optimize]日志输出增加支持 MDC,方便用户在 logback.xml 中 json 格式化日志 ([#1032](https://github.com/didi/KnowStreaming/issues/1032)) +- [Optimize]Jmx 相关日志优化 ([#1082](https://github.com/didi/KnowStreaming/issues/1082)) +- [Optimize]Topic-Partitions增加主动超时功能 ([#1076](https://github.com/didi/KnowStreaming/issues/1076)) +- [Optimize]Topic-Messages页面后端增加按照Partition和Offset纬度的排序 ([#1075](https://github.com/didi/KnowStreaming/issues/1075)) +- [Optimize]Connect-JSON模式下的JSON格式和官方API的格式不一致 ([#1080](https://github.com/didi/KnowStreaming/issues/1080)), ([#1153](https://github.com/didi/KnowStreaming/issues/1153)), ([#1192](https://github.com/didi/KnowStreaming/issues/1192)) +- [Optimize]登录页面展示的 star 数量修改为最新的数量 +- [Optimize]Group 列表的 maxLag 指标调整为实时获取 ([#1074](https://github.com/didi/KnowStreaming/issues/1074)) +- [Optimize]Connector增加重启、编辑、删除等权限点 ([#1066](https://github.com/didi/KnowStreaming/issues/1066)), ([#1147](https://github.com/didi/KnowStreaming/issues/1147)) +- [Optimize]优化 pom.xml 中,KS版本的标签名 +- [Optimize]优化集群Brokers中, Controller显示存在延迟的问题 ([#1162](https://github.com/didi/KnowStreaming/issues/1162)) +- [Optimize]bump jackson version to 2.13.5 +- [Optimize]权限新增 ACL,自定义权限配置,资源 TransactionalId 优化 ([#1192](https://github.com/didi/KnowStreaming/issues/1192)) +- [Optimize]Connect 样式优化 +- [Optimize]消费组详情控制数据实时刷新 + + +**功能新增** +- [Feature]新增删除 Group 或 GroupOffset 功能 ([#1064](https://github.com/didi/KnowStreaming/issues/1064)), ([#1084](https://github.com/didi/KnowStreaming/issues/1084)), ([#1040](https://github.com/didi/KnowStreaming/issues/1040)), ([#1144](https://github.com/didi/KnowStreaming/issues/1144)) +- [Feature]增加 Truncate 数据功能 ([#1062](https://github.com/didi/KnowStreaming/issues/1062)), ([#1043](https://github.com/didi/KnowStreaming/issues/1043)), ([#1145](https://github.com/didi/KnowStreaming/issues/1145)) +- [Feature]支持指定 Server 的具体 Jmx 端口 ([#965](https://github.com/didi/KnowStreaming/issues/965)) + + +**文档更新** +- [Doc]FAQ 补充 ES 8.x 版本使用说明 ([#1189](https://github.com/didi/KnowStreaming/issues/1189)) +- [Doc]补充启动失败的说明 ([#1126](https://github.com/didi/KnowStreaming/issues/1126)) +- [Doc]补充 ZK 无数据排查说明 ([#1004](https://github.com/didi/KnowStreaming/issues/1004)) +- [Doc]无数据排查文档,补充 ES 集群 Shard 满的异常日志 +- [Doc]README 补充页面无数据排查手册链接 +- [Doc]补充连接特定 Jmx 端口的说明 ([#965](https://github.com/didi/KnowStreaming/issues/965)) +- [Doc]补充 zk_properties 字段的使用说明 ([#1003](https://github.com/didi/KnowStreaming/issues/1003)) + + +--- + + ## v3.3.0 **问题修复** diff --git "a/docs/dev_guide/\346\214\207\346\240\207\350\257\264\346\230\216.md" "b/docs/dev_guide/\346\214\207\346\240\207\350\257\264\346\230\216.md" index 1eb9a94b8..9e6101da5 100644 --- "a/docs/dev_guide/\346\214\207\346\240\207\350\257\264\346\230\216.md" +++ "b/docs/dev_guide/\346\214\207\346\240\207\350\257\264\346\230\216.md" @@ -6,72 +6,72 @@ ### 3.3.1、Cluster 指标 -| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | -| ------------------------- | -------- | ------------------------------------ | ---------------- | --------------- | -| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 | -| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 | -| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 | +| 指标名称 | 指标单位 | 指标含义 | kafka 版本 | 企业/开源版指标 | +| ------------------------- | -------- |--------------------------------| ---------------- | --------------- | +| HealthScore | 分 | 集群总体的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed | 个 | 集群总体健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal | 个 | 集群总体健康检查总数 | 全部版本 | 开源版 | | HealthScore_Topics | 分 | 集群 Topics 的健康分 | 全部版本 | 开源版 | -| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 | -| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 | +| HealthCheckPassed_Topics | 个 | 集群 Topics 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Topics | 个 | 集群 Topics 健康检查总数 | 全部版本 | 开源版 | | HealthScore_Brokers | 分 | 集群 Brokers 的健康分 | 全部版本 | 开源版 | -| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 | -| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 | +| HealthCheckPassed_Brokers | 个 | 集群 Brokers 健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Brokers | 个 | 集群 Brokers 健康检查总数 | 全部版本 | 开源版 | | HealthScore_Groups | 分 | 集群 Groups 的健康分 | 全部版本 | 开源版 | -| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | -| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | -| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 | -| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 | -| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 | -| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 | -| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 | +| HealthCheckPassed_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | +| HealthCheckTotal_Groups | 个 | 集群 Groups 健康检查总数 | 全部版本 | 开源版 | +| HealthScore_Cluster | 分 | 集群自身的健康分 | 全部版本 | 开源版 | +| HealthCheckPassed_Cluster | 个 | 集群自身健康检查通过数 | 全部版本 | 开源版 | +| HealthCheckTotal_Cluster | 个 | 集群自身健康检查总数 | 全部版本 | 开源版 | +| TotalRequestQueueSize | 个 | 集群中总的请求队列数 | 全部版本 | 开源版 | +| TotalResponseQueueSize | 个 | 集群中总的响应队列数 | 全部版本 | 开源版 | | EventQueueSize | 个 | 集群中 Controller 的 EventQueue 大小 | 2.0.0 及以上版本 | 开源版 | -| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 | -| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 | -| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 | -| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 | -| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 | +| ActiveControllerCount | 个 | 集群中存活的 Controller 数 | 全部版本 | 开源版 | +| TotalProduceRequests | 个 | 集群中的 Produce 每秒请求数 | 全部版本 | 开源版 | +| TotalLogSize | byte | 集群总的已使用的磁盘大小 | 全部版本 | 开源版 | +| ConnectionsCount | 个 | 集群的连接(Connections)个数 | 全部版本 | 开源版 | +| Zookeepers | 个 | 集群中存活的 zk 节点个数 | 全部版本 | 开源版 | | ZookeepersAvailable | 是/否 | ZK 地址是否合法 | 全部版本 | 开源版 | | Brokers | 个 | 集群的 broker 的总数 | 全部版本 | 开源版 | -| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 | -| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 | +| BrokersAlive | 个 | 集群的 broker 的存活数 | 全部版本 | 开源版 | +| BrokersNotAlive | 个 | 集群的 broker 的未存活数 | 全部版本 | 开源版 | | Replicas | 个 | 集群中 Replica 的总数 | 全部版本 | 开源版 | | Topics | 个 | 集群中 Topic 的总数 | 全部版本 | 开源版 | -| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 | +| Partitions | 个 | 集群的 Partitions 总数 | 全部版本 | 开源版 | | PartitionNoLeader | 个 | 集群中的 PartitionNoLeader 总数 | 全部版本 | 开源版 | -| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 | -| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 | -| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 | -| MessagesIn | 条/s | 集群每条消息写入条数 | 全部版本 | 开源版 | -| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 | -| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 | -| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 | -| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 | -| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 | -| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 | -| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 | -| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 | +| PartitionMinISR_S | 个 | 集群中的小于 PartitionMinISR 总数 | 全部版本 | 开源版 | +| PartitionMinISR_E | 个 | 集群中的等于 PartitionMinISR 总数 | 全部版本 | 开源版 | +| PartitionURP | 个 | 集群中的未同步的 Partition 总数 | 全部版本 | 开源版 | +| MessagesIn | 条/s | 集群每秒消息写入条数 | 全部版本 | 开源版 | +| Messages | 条 | 集群总的消息条数 | 全部版本 | 开源版 | +| LeaderMessages | 条 | 集群中 leader 总的消息条数 | 全部版本 | 开源版 | +| BytesIn | byte/s | 集群的每秒写入字节数 | 全部版本 | 开源版 | +| BytesIn_min_5 | byte/s | 集群的每秒写入字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesIn_min_15 | byte/s | 集群的每秒写入字节数,15 分钟均值 | 全部版本 | 开源版 | +| BytesOut | byte/s | 集群的每秒流出字节数 | 全部版本 | 开源版 | +| BytesOut_min_5 | byte/s | 集群的每秒流出字节数,5 分钟均值 | 全部版本 | 开源版 | +| BytesOut_min_15 | byte/s | 集群的每秒流出字节数,15 分钟均值 | 全部版本 | 开源版 | | Groups | 个 | 集群中 Group 的总数 | 全部版本 | 开源版 | | GroupActives | 个 | 集群中 ActiveGroup 的总数 | 全部版本 | 开源版 | | GroupEmptys | 个 | 集群中 EmptyGroup 的总数 | 全部版本 | 开源版 | | GroupRebalances | 个 | 集群中 RebalanceGroup 的总数 | 全部版本 | 开源版 | | GroupDeads | 个 | 集群中 DeadGroup 的总数 | 全部版本 | 开源版 | -| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 | -| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 | -| Acls | 个 | ACL 数 | 全部版本 | 开源版 | -| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 | -| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 | -| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 | +| Alive | 是/否 | 集群是否存活,1:存活;0:没有存活 | 全部版本 | 开源版 | +| AclEnable | 是/否 | 集群是否开启 Acl,1:是;0:否 | 全部版本 | 开源版 | +| Acls | 个 | ACL 数 | 全部版本 | 开源版 | +| AclUsers | 个 | ACL-KafkaUser 数 | 全部版本 | 开源版 | +| AclTopics | 个 | ACL-Topic 数 | 全部版本 | 开源版 | +| AclGroups | 个 | ACL-Group 数 | 全部版本 | 开源版 | | Jobs | 个 | 集群任务总数 | 全部版本 | 开源版 | | JobsRunning | 个 | 集群 running 任务总数 | 全部版本 | 开源版 | | JobsWaiting | 个 | 集群 waiting 任务总数 | 全部版本 | 开源版 | | JobsSuccess | 个 | 集群 success 任务总数 | 全部版本 | 开源版 | | JobsFailed | 个 | 集群 failed 任务总数 | 全部版本 | 开源版 | -| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 | -| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | -| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | -| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | -| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceEnable | 是/否 | 是否开启均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceCpu | 是/否 | CPU 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceNwIn | 是/否 | BytesIn 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceNwOut | 是/否 | BytesOut 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | +| LoadReBalanceDisk | 是/否 | Disk 是否均衡, 1:是;0:否 | 全部版本 | 企业版 | ### 3.3.2、Broker 指标 diff --git "a/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" "b/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" index 675a9dabd..5763026d1 100644 --- "a/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" +++ "b/docs/install_guide/\347\211\210\346\234\254\345\215\207\347\272\247\346\211\213\345\206\214.md" @@ -53,6 +53,11 @@ INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `l INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming'); + + +-- 多集群管理权限2023-07-18新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming'); ``` ### 升级至 `3.3.0` 版本 diff --git a/docs/user_guide/faq.md b/docs/user_guide/faq.md index b66523ff4..de20e3dbd 100644 --- a/docs/user_guide/faq.md +++ b/docs/user_guide/faq.md @@ -7,7 +7,7 @@ - [1、支持哪些 Kafka 版本?](#1支持哪些-kafka-版本) - [1、2.x 版本和 3.0 版本有什么差异?](#12x-版本和-30-版本有什么差异) - [3、页面流量信息等无数据?](#3页面流量信息等无数据) - - [8.4、`Jmx`连接失败如何解决?](#84jmx连接失败如何解决) + - [4、`Jmx`连接失败如何解决?](#4jmx连接失败如何解决) - [5、有没有 API 文档?](#5有没有-api-文档) - [6、删除 Topic 成功后,为何过段时间又出现了?](#6删除-topic-成功后为何过段时间又出现了) - [7、如何在不登录的情况下,调用接口?](#7如何在不登录的情况下调用接口) @@ -21,6 +21,8 @@ - [15、测试时使用Testcontainers的说明](#15测试时使用testcontainers的说明) - [16、JMX连接失败怎么办](#16jmx连接失败怎么办) - [17、zk监控无数据问题](#17zk监控无数据问题) + - [18、启动失败,报NoClassDefFoundError如何解决](#18启动失败报noclassdeffounderror如何解决) + - [19、依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决] ## 1、支持哪些 Kafka 版本? @@ -57,7 +59,7 @@   -## 8.4、`Jmx`连接失败如何解决? +## 4、`Jmx`连接失败如何解决? - 参看 [Jmx 连接配置&问题解决](https://doc.knowstreaming.com/product/9-attachment#91jmx-%E8%BF%9E%E6%8E%A5%E5%A4%B1%E8%B4%A5%E9%97%AE%E9%A2%98%E8%A7%A3%E5%86%B3) 说明。 @@ -278,3 +280,42 @@ zookeeper集群正常,但Ks上zk页面所有监控指标无数据,`KnowStrea ``` 4lw.commands.whitelist=* ``` + +## 18、启动失败,报NoClassDefFoundError如何解决 + +**错误现象:** +```log +# 启动失败,报nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton + + +2023-08-11 22:54:29.842 [main] ERROR class=org.springframework.boot.SpringApplication||Application run failed +org.springframework.beans.factory.BeanCreationException: Error creating bean with name 'quartzScheduler' defined in class path resource [com/didiglobal/logi/job/LogIJobAutoConfiguration.class]: Bean instantiation via factory method failed; nested exception is org.springframework.beans.BeanInstantiationException: Failed to instantiate [com.didiglobal.logi.job.core.Scheduler]: Factory method 'quartzScheduler' threw exception; nested exception is java.lang.NoClassDefFoundError: Could not initialize class com.didiglobal.logi.job.core.WorkerSingleton$Singleton +at org.springframework.beans.factory.support.ConstructorResolver.instantiate(ConstructorResolver.java:657) +``` + + +**问题原因:** +1. `KnowStreaming` 依赖的 `Logi-Job` 初始化 `WorkerSingleton$Singleton` 失败。 +2. `WorkerSingleton$Singleton` 初始化的过程中,会去获取一些操作系统的信息,如果获取时出现了异常,则会导致 `WorkerSingleton$Singleton` 初始化失败。 + + +**临时建议:** + +`Logi-Job` 问题的修复时间不好控制,之前我们测试验证了一下,在 `Windows`、`Mac`、`CentOS` 这几个操作系统下基本上都是可以正常运行的。 + +所以,如果有条件的话,可以暂时先使用这几个系统部署 `KnowStreaming`。 + +如果在在 `Windows`、`Mac`、`CentOS` 这几个操作系统下也出现了启动失败的问题,可以重试2-3次看是否还是启动失败,或者换一台机器试试。 + +## 依赖ElasticSearch 8.0以上版本部署后指标信息无法正常显示如何解决 +**错误现象** +```log +Warnings: [299 Elasticsearch-8.9.1-a813d015ef1826148d9d389bd1c0d781c6e349f0 "Legacy index templates are deprecated in favor of composable templates."] +``` +**问题原因** +1. ES8.0和ES7.0版本存在Template模式的差异,建议使用 /_index_template 端点来管理模板; +2. ES java client在此版本的行为很奇怪表现为读取数据为空; + +**解决方法** +修改`es_template_create.sh`脚本中所有的`/_template`为`/_index_template`后执行即可。 + diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java index 191afc6bb..6e1440ef5 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/connector/impl/ConnectorManagerImpl.java @@ -12,6 +12,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.connect.connector.ConnectorStateVO; import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant; import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; +import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService; import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService; import org.apache.kafka.connect.runtime.AbstractStatus; @@ -30,6 +31,9 @@ public class ConnectorManagerImpl implements ConnectorManager { @Autowired private ConnectorService connectorService; + @Autowired + private OpConnectorService opConnectorService; + @Autowired private WorkerConnectorService workerConnectorService; @@ -44,24 +48,24 @@ public Result updateConnectorConfig(Long connectClusterId, String connecto return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, "Connector参数错误"); } - return connectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator); + return opConnectorService.updateConnectorConfig(connectClusterId, connectorName, configs, operator); } @Override public Result createConnector(ConnectorCreateDTO dto, String operator) { dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); - Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); + Result createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); if (createResult.failed()) { return Result.buildFromIgnoreData(createResult); } - Result ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(dto.getConnectClusterId(), dto.getConnectorName()); + Result ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName()); if (ksConnectorResult.failed()) { return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功,但是获取元信息失败,页面元信息会存在1分钟延迟"); } - connectorService.addNewToDB(ksConnectorResult.getData()); + opConnectorService.addNewToDB(ksConnectorResult.getData()); return Result.buildSuc(); } @@ -69,12 +73,12 @@ public Result createConnector(ConnectorCreateDTO dto, String operator) { public Result createConnector(ConnectorCreateDTO dto, String heartbeatName, String checkpointName, String operator) { dto.getSuitableConfig().put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, dto.getConnectorName()); - Result createResult = connectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); + Result createResult = opConnectorService.createConnector(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); if (createResult.failed()) { return Result.buildFromIgnoreData(createResult); } - Result ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(dto.getConnectClusterId(), dto.getConnectorName()); + Result ksConnectorResult = connectorService.getConnectorFromKafka(dto.getConnectClusterId(), dto.getConnectorName()); if (ksConnectorResult.failed()) { return Result.buildFromRSAndMsg(ResultStatus.SUCCESS, "创建成功,但是获取元信息失败,页面元信息会存在1分钟延迟"); } @@ -83,7 +87,7 @@ public Result createConnector(ConnectorCreateDTO dto, String heartbeatName connector.setCheckpointConnectorName(checkpointName); connector.setHeartbeatConnectorName(heartbeatName); - connectorService.addNewToDB(connector); + opConnectorService.addNewToDB(connector); return Result.buildSuc(); } diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java index de10b0f00..750220a7f 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/connect/mm2/impl/MirrorMakerManagerImpl.java @@ -37,6 +37,7 @@ import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; +import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService; import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService; import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService; @@ -67,6 +68,9 @@ public class MirrorMakerManagerImpl implements MirrorMakerManager { @Autowired private ConnectorService connectorService; + @Autowired + private OpConnectorService opConnectorService; + @Autowired private WorkerConnectorService workerConnectorService; @@ -156,20 +160,20 @@ public Result deleteMirrorMaker(Long connectClusterId, String sourceConnec Result rv = Result.buildSuc(); if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) { - rv = connectorService.deleteConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); + rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); } if (rv.failed()) { return rv; } if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) { - rv = connectorService.deleteConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); + rv = opConnectorService.deleteConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); } if (rv.failed()) { return rv; } - return connectorService.deleteConnector(connectClusterId, sourceConnectorName, operator); + return opConnectorService.deleteConnector(connectClusterId, sourceConnectorName, operator); } @Override @@ -181,20 +185,20 @@ public Result modifyMirrorMakerConfig(MirrorMakerCreateDTO dto, String ope Result rv = Result.buildSuc(); if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName()) && dto.getCheckpointConnectorConfigs() != null) { - rv = connectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getCheckpointConnectorName(), dto.getCheckpointConnectorConfigs(), operator); + rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getCheckpointConnectorName(), dto.getCheckpointConnectorConfigs(), operator); } if (rv.failed()) { return rv; } if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName()) && dto.getHeartbeatConnectorConfigs() != null) { - rv = connectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getHeartbeatConnectorName(), dto.getHeartbeatConnectorConfigs(), operator); + rv = opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), connectorPO.getHeartbeatConnectorName(), dto.getHeartbeatConnectorConfigs(), operator); } if (rv.failed()) { return rv; } - return connectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); + return opConnectorService.updateConnectorConfig(dto.getConnectClusterId(), dto.getConnectorName(), dto.getSuitableConfig(), operator); } @Override @@ -206,20 +210,20 @@ public Result restartMirrorMaker(Long connectClusterId, String sourceConne Result rv = Result.buildSuc(); if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) { - rv = connectorService.restartConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); + rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); } if (rv.failed()) { return rv; } if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) { - rv = connectorService.restartConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); + rv = opConnectorService.restartConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); } if (rv.failed()) { return rv; } - return connectorService.restartConnector(connectClusterId, sourceConnectorName, operator); + return opConnectorService.restartConnector(connectClusterId, sourceConnectorName, operator); } @Override @@ -231,20 +235,20 @@ public Result stopMirrorMaker(Long connectClusterId, String sourceConnecto Result rv = Result.buildSuc(); if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) { - rv = connectorService.stopConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); + rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); } if (rv.failed()) { return rv; } if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) { - rv = connectorService.stopConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); + rv = opConnectorService.stopConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); } if (rv.failed()) { return rv; } - return connectorService.stopConnector(connectClusterId, sourceConnectorName, operator); + return opConnectorService.stopConnector(connectClusterId, sourceConnectorName, operator); } @Override @@ -256,20 +260,20 @@ public Result resumeMirrorMaker(Long connectClusterId, String sourceConnec Result rv = Result.buildSuc(); if (!ValidateUtils.isBlank(connectorPO.getCheckpointConnectorName())) { - rv = connectorService.resumeConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); + rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getCheckpointConnectorName(), operator); } if (rv.failed()) { return rv; } if (!ValidateUtils.isBlank(connectorPO.getHeartbeatConnectorName())) { - rv = connectorService.resumeConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); + rv = opConnectorService.resumeConnector(connectClusterId, connectorPO.getHeartbeatConnectorName(), operator); } if (rv.failed()) { return rv; } - return connectorService.resumeConnector(connectClusterId, sourceConnectorName, operator); + return opConnectorService.resumeConnector(connectClusterId, sourceConnectorName, operator); } @Override diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java index ea6465a38..60c9b0667 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/GroupManager.java @@ -26,7 +26,7 @@ PaginationResult pagingGroupMembers(Long clusterPhyId, String searchGroupKeyword, PaginationBaseDTO dto); - PaginationResult pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto); + PaginationResult pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) throws Exception; PaginationResult pagingClusterGroupsOverview(Long clusterPhyId, ClusterGroupSummaryDTO dto); diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java index 753768dfc..55d7219c4 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/group/impl/GroupManagerImpl.java @@ -118,10 +118,15 @@ public PaginationResult pagingGroupMembers(Long clusterPhy } @Override - public PaginationResult pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) { + public PaginationResult pagingGroupTopicMembers(Long clusterPhyId, String groupName, PaginationBaseDTO dto) throws Exception { long startTimeUnitMs = System.currentTimeMillis(); - Group group = groupService.getGroupFromDB(clusterPhyId, groupName); + ClusterPhy clusterPhy = clusterPhyService.getClusterByCluster(clusterPhyId); + if (clusterPhy == null) { + return PaginationResult.buildFailure(MsgConstant.getClusterPhyNotExist(clusterPhyId), dto); + } + + Group group = groupService.getGroupFromKafka(clusterPhy, groupName); //没有topicMember则直接返回 if (group == null || ValidateUtils.isEmptyList(group.getTopicMembers())) { diff --git a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java index 22d204ea5..424594472 100644 --- a/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java +++ b/km-biz/src/main/java/com/xiaojukeji/know/streaming/km/biz/topic/impl/OpTopicManagerImpl.java @@ -7,6 +7,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.topic.TopicExpansionDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.broker.Broker; import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.common.bean.entity.param.config.KafkaTopicConfigParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicCreateParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.param.topic.TopicPartitionExpandParam; @@ -17,17 +18,17 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.topic.Topic; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; -import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; -import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; -import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.common.utils.*; import com.xiaojukeji.know.streaming.km.common.utils.kafka.KafkaReplicaAssignUtil; import com.xiaojukeji.know.streaming.km.core.service.broker.BrokerService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; import com.xiaojukeji.know.streaming.km.core.service.partition.PartitionService; import com.xiaojukeji.know.streaming.km.core.service.topic.OpTopicService; +import com.xiaojukeji.know.streaming.km.core.service.topic.TopicConfigService; import com.xiaojukeji.know.streaming.km.core.service.topic.TopicService; import kafka.admin.AdminUtils; import kafka.admin.BrokerMetadata; +import org.apache.kafka.common.config.TopicConfig; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Transactional; @@ -61,6 +62,9 @@ public class OpTopicManagerImpl implements OpTopicManager { @Autowired private PartitionService partitionService; + @Autowired + private TopicConfigService topicConfigService; + @Override public Result createTopic(TopicCreateDTO dto, String operator) { log.info("method=createTopic||param={}||operator={}.", dto, operator); @@ -160,10 +164,27 @@ public Result expandTopic(TopicExpansionDTO dto, String operator) { @Override public Result truncateTopic(Long clusterPhyId, String topicName, String operator) { + // 增加delete配置 + Result> rt = this.addDeleteConfigIfNotExist(clusterPhyId, topicName, operator); + if (rt.failed()) { + log.error("method=truncateTopic||clusterPhyId={}||topicName={}||operator={}||result={}||msg=get config from kafka failed", clusterPhyId, topicName, operator, rt); + return Result.buildFromIgnoreData(rt); + } + // 清空Topic Result rv = opTopicService.truncateTopic(new TopicTruncateParam(clusterPhyId, topicName, KafkaConstant.TOPICK_TRUNCATE_DEFAULT_OFFSET), operator); if (rv.failed()) { - return rv; + log.error("method=truncateTopic||clusterPhyId={}||topicName={}||originConfig={}||operator={}||result={}||msg=truncate topic failed", clusterPhyId, topicName, rt.getData().v2(), operator, rv); + // config被修改了,则错误提示需要提醒一下,否则直接返回错误 + return rt.getData().v1() ? Result.buildFailure(rv.getCode(), rv.getMessage() + "\t\n" + String.format("Topic的CleanupPolicy已被修改,需要手动恢复为%s", rt.getData().v2())) : rv; + } + + // 恢复compact配置 + rv = this.recoverConfigIfChanged(clusterPhyId, topicName, rt.getData().v1(), rt.getData().v2(), operator); + if (rv.failed()) { + log.error("method=truncateTopic||clusterPhyId={}||topicName={}||originConfig={}||operator={}||result={}||msg=truncate topic success but recover config failed", clusterPhyId, topicName, rt.getData().v2(), operator, rv); + // config被修改了,则错误提示需要提醒一下,否则直接返回错误 + return Result.buildFailure(rv.getCode(), String.format("Topic清空操作已成功,但是恢复CleanupPolicy配置失败,需要手动恢复为%s。", rt.getData().v2()) + "\t\n" + rv.getMessage()); } return Result.buildSuc(); @@ -171,6 +192,44 @@ public Result truncateTopic(Long clusterPhyId, String topicName, String op /**************************************************** private method ****************************************************/ + private Result> addDeleteConfigIfNotExist(Long clusterPhyId, String topicName, String operator) { + // 获取Topic配置 + Result> configMapResult = topicConfigService.getTopicConfigFromKafka(clusterPhyId, topicName); + if (configMapResult.failed()) { + return Result.buildFromIgnoreData(configMapResult); + } + + String cleanupPolicyValue = configMapResult.getData().getOrDefault(TopicConfig.CLEANUP_POLICY_CONFIG, ""); + List cleanupPolicyValueList = CommonUtils.string2StrList(cleanupPolicyValue); + if (cleanupPolicyValueList.size() == 1 && cleanupPolicyValueList.contains(TopicConfig.CLEANUP_POLICY_DELETE)) { + // 不需要修改 + return Result.buildSuc(new Tuple<>(Boolean.FALSE, cleanupPolicyValue)); + } + + Map changedConfigMap = new HashMap<>(1); + changedConfigMap.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); + + Result rv = topicConfigService.modifyTopicConfig(new KafkaTopicConfigParam(clusterPhyId, topicName, changedConfigMap), operator); + if (rv.failed()) { + // 修改失败 + return Result.buildFromIgnoreData(rv); + } + + return Result.buildSuc(new Tuple<>(Boolean.TRUE, cleanupPolicyValue)); + } + + private Result recoverConfigIfChanged(Long clusterPhyId, String topicName, Boolean changed, String originValue, String operator) { + if (!changed) { + // 没有修改,直接返回 + return Result.buildSuc(); + } + + // 恢复配置 + Map changedConfigMap = new HashMap<>(1); + changedConfigMap.put(TopicConfig.CLEANUP_POLICY_CONFIG, originValue); + + return topicConfigService.modifyTopicConfig(new KafkaTopicConfigParam(clusterPhyId, topicName, changedConfigMap), operator); + } private Seq buildBrokerMetadataSeq(Long clusterPhyId, final List selectedBrokerIdList) { // 选取Broker列表 diff --git a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/connect/ConnectConnectorMetricCollector.java b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/connect/ConnectConnectorMetricCollector.java index 4da6d8fde..c49e1688e 100644 --- a/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/connect/ConnectConnectorMetricCollector.java +++ b/km-collector/src/main/java/com/xiaojukeji/know/streaming/km/collector/metric/connect/ConnectConnectorMetricCollector.java @@ -44,7 +44,7 @@ public List collectConnectMetrics(ConnectCluster connectCluste Long connectClusterId = connectCluster.getId(); List items = versionControlService.listVersionControlItem(this.getClusterVersion(connectCluster), collectorType().getCode()); - Result> connectorList = connectorService.listConnectorsFromCluster(connectClusterId); + Result> connectorList = connectorService.listConnectorsFromCluster(connectCluster); FutureWaitUtil future = this.getFutureUtilByClusterPhyId(connectClusterId); diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectClusterMetrics.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectClusterMetrics.java index fe710391e..f7c508187 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectClusterMetrics.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectClusterMetrics.java @@ -1,7 +1,6 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect; import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics; -import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; @@ -12,20 +11,18 @@ */ @Data @NoArgsConstructor -@AllArgsConstructor @ToString public class ConnectClusterMetrics extends BaseMetrics { - private Long connectClusterId; + protected Long connectClusterId; - public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId){ + public ConnectClusterMetrics(Long clusterPhyId, Long connectClusterId ){ super(clusterPhyId); this.connectClusterId = connectClusterId; } - public static ConnectClusterMetrics initWithMetric(Long connectClusterId, String metric, Float value) { - ConnectClusterMetrics brokerMetrics = new ConnectClusterMetrics(connectClusterId, connectClusterId); - brokerMetrics.putMetric(metric, value); - return brokerMetrics; + public ConnectClusterMetrics(Long connectClusterId, String metricName, Float metricValue) { + this(null, connectClusterId); + this.putMetric(metricName, metricValue); } @Override diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectWorkerMetrics.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectWorkerMetrics.java index 78d9fe063..de4936e56 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectWorkerMetrics.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectWorkerMetrics.java @@ -1,7 +1,5 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect; -import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics; -import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; @@ -11,25 +9,19 @@ * @date 2022/11/2 */ @Data -@AllArgsConstructor @NoArgsConstructor @ToString -public class ConnectWorkerMetrics extends BaseMetrics { - - private Long connectClusterId; - +public class ConnectWorkerMetrics extends ConnectClusterMetrics { private String workerId; - public static ConnectWorkerMetrics initWithMetric(Long connectClusterId, String workerId, String metric, Float value) { - ConnectWorkerMetrics connectWorkerMetrics = new ConnectWorkerMetrics(); - connectWorkerMetrics.setConnectClusterId(connectClusterId); - connectWorkerMetrics.setWorkerId(workerId); - connectWorkerMetrics.putMetric(metric, value); - return connectWorkerMetrics; + public ConnectWorkerMetrics(Long connectClusterId, String workerId, String metricName, Float metricValue) { + super(null, connectClusterId); + this.workerId = workerId; + this.putMetric(metricName, metricValue); } @Override public String unique() { - return "KCC@" + clusterPhyId + "@" + connectClusterId + "@" + workerId; + return "KCW@" + clusterPhyId + "@" + connectClusterId + "@" + workerId; } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorMetrics.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorMetrics.java index 08540ed5b..b497efb87 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorMetrics.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorMetrics.java @@ -1,6 +1,5 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect; -import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; @@ -12,24 +11,21 @@ @Data @NoArgsConstructor @ToString -public class ConnectorMetrics extends BaseMetrics { - private Long connectClusterId; +public class ConnectorMetrics extends ConnectClusterMetrics { + protected String connectorName; - private String connectorName; - - private String connectorNameAndClusterId; + protected String connectorNameAndClusterId; public ConnectorMetrics(Long connectClusterId, String connectorName) { - super(null); + super(null, connectClusterId); this.connectClusterId = connectClusterId; this.connectorName = connectorName; this.connectorNameAndClusterId = connectorName + "#" + connectClusterId; } - public static ConnectorMetrics initWithMetric(Long connectClusterId, String connectorName, String metricName, Float value) { - ConnectorMetrics metrics = new ConnectorMetrics(connectClusterId, connectorName); - metrics.putMetric(metricName, value); - return metrics; + public ConnectorMetrics(Long connectClusterId, String connectorName, String metricName, Float metricValue) { + this(connectClusterId, connectorName); + this.putMetric(metricName, metricValue); } @Override diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorTaskMetrics.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorTaskMetrics.java index eb0dc42de..fc28c97e9 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorTaskMetrics.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/entity/metrics/connect/ConnectorTaskMetrics.java @@ -1,6 +1,5 @@ package com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect; -import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.BaseMetrics; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; @@ -12,11 +11,7 @@ @Data @NoArgsConstructor @ToString -public class ConnectorTaskMetrics extends BaseMetrics { - private Long connectClusterId; - - private String connectorName; - +public class ConnectorTaskMetrics extends ConnectorMetrics { private Integer taskId; public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId) { @@ -25,14 +20,13 @@ public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer this.taskId = taskId; } - public static ConnectorTaskMetrics initWithMetric(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float value) { - ConnectorTaskMetrics metrics = new ConnectorTaskMetrics(connectClusterId, connectorName, taskId); - metrics.putMetric(metricName,value); - return metrics; + public ConnectorTaskMetrics(Long connectClusterId, String connectorName, Integer taskId, String metricName, Float metricValue) { + this(connectClusterId, connectorName, taskId); + this.putMetric(metricName, metricValue); } @Override public String unique() { - return "KCOR@" + connectClusterId + "@" + connectorName + "@" + taskId; + return "KCORT@" + connectClusterId + "@" + connectorName + "@" + taskId; } } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/event/cluster/connect/ClusterPhyDeletedEvent.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/event/cluster/connect/ClusterPhyDeletedEvent.java new file mode 100644 index 000000000..29e19bc6b --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/bean/event/cluster/connect/ClusterPhyDeletedEvent.java @@ -0,0 +1,16 @@ +package com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect; + +import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyBaseEvent; +import lombok.Getter; + +/** + * 集群删除事件 + * @author zengqiao + * @date 23/08/15 + */ +@Getter +public class ClusterPhyDeletedEvent extends ClusterPhyBaseEvent { + public ClusterPhyDeletedEvent(Object source, Long clusterPhyId) { + super(source, clusterPhyId); + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ConnectConverter.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ConnectConverter.java index 6dcc30e49..c20add65c 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ConnectConverter.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/converter/ConnectConverter.java @@ -16,6 +16,8 @@ import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant; import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import com.xiaojukeji.know.streaming.km.common.utils.Triple; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import java.util.ArrayList; import java.util.HashMap; @@ -24,6 +26,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME; +import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME; + public class ConnectConverter { public static ConnectorBasicCombineExistVO convert2BasicVO(ConnectCluster connectCluster, ConnectorPO connectorPO) { ConnectorBasicCombineExistVO vo = new ConnectorBasicCombineExistVO(); @@ -153,6 +158,66 @@ public static KSConnector convert2KSConnector(Long kafkaClusterPhyId, Long conne return ksConnector; } + public static List convertAndSupplyMirrorMakerInfo(ConnectCluster connectCluster, List, KSConnectorStateInfo>> connectorFullInfoList) { + // + Map sourceMap = new HashMap<>(); + + // + Map heartbeatMap = new HashMap<>(); + Map checkpointMap = new HashMap<>(); + + // 获取每个类型的connector的map信息 + connectorFullInfoList.forEach(connector -> { + Map mm2Map = null; + if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) { + mm2Map = sourceMap; + } else if (KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) { + mm2Map = heartbeatMap; + } else if (KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) { + mm2Map = checkpointMap; + } + + String targetBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); + String sourceBootstrapServers = connector.v1().getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); + + if (ValidateUtils.anyBlank(targetBootstrapServers, sourceBootstrapServers) || mm2Map == null) { + return; + } + + if (KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE.equals(connector.v1().getConfig().get(KafkaConnectConstant.CONNECTOR_CLASS_FILED_NAME))) { + // source 类型的格式和 heartbeat & checkpoint 的不一样 + mm2Map.put(connector.v1().getName(), targetBootstrapServers + "@" + sourceBootstrapServers); + } else { + mm2Map.put(targetBootstrapServers + "@" + sourceBootstrapServers, connector.v1().getName()); + } + }); + + + List connectorList = new ArrayList<>(); + connectorFullInfoList.forEach(connector -> { + // 转化并添加到list中 + KSConnector ksConnector = ConnectConverter.convert2KSConnector( + connectCluster.getKafkaClusterPhyId(), + connectCluster.getId(), + connector.v1(), + connector.v3(), + connector.v2() + ); + connectorList.add(ksConnector); + + // 补充mm2信息 + String targetAndSource = sourceMap.get(ksConnector.getConnectorName()); + if (ValidateUtils.isBlank(targetAndSource)) { + return; + } + + ksConnector.setHeartbeatConnectorName(heartbeatMap.getOrDefault(targetAndSource, "")); + ksConnector.setCheckpointConnectorName(checkpointMap.getOrDefault(targetAndSource, "")); + }); + + return connectorList; + } + private static String genConnectorKey(Long connectorId, String connectorName){ return connectorId + "#" + connectorName; } diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/connect/ConnectStatusEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/connect/ConnectStatusEnum.java new file mode 100644 index 000000000..235cead61 --- /dev/null +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/connect/ConnectStatusEnum.java @@ -0,0 +1,50 @@ +package com.xiaojukeji.know.streaming.km.common.enums.connect; + +import org.apache.kafka.connect.runtime.AbstractStatus; + +/** + * connector运行状态 + * @see AbstractStatus + */ +public enum ConnectStatusEnum { + UNASSIGNED(0, "UNASSIGNED"), + + RUNNING(1,"RUNNING"), + + PAUSED(2,"PAUSED"), + + FAILED(3, "FAILED"), + + DESTROYED(4, "DESTROYED"), + + UNKNOWN(-1, "UNKNOWN") + + ; + + ConnectStatusEnum(int status, String value) { + this.status = status; + this.value = value; + } + + private final int status; + + private final String value; + + public static ConnectStatusEnum getByValue(String value) { + for (ConnectStatusEnum statusEnum: ConnectStatusEnum.values()) { + if (statusEnum.value.equals(value)) { + return statusEnum; + } + } + + return ConnectStatusEnum.UNKNOWN; + } + + public int getStatus() { + return status; + } + + public String getValue() { + return value; + } +} diff --git a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionEnum.java b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionEnum.java index 5c5948488..d08e75beb 100644 --- a/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionEnum.java +++ b/km-common/src/main/java/com/xiaojukeji/know/streaming/km/common/enums/version/VersionEnum.java @@ -73,9 +73,27 @@ public enum VersionEnum { * 3.x.x */ V_3_0_0("3.0.0", normailze("3.0.0")), + V_3_1_0("3.1.0", normailze("3.1.0")), + V_3_1_1("3.1.1", normailze("3.1.1")), + V_3_1_2("3.1.2", normailze("3.1.2")), + V_3_2_0("3.2.0", normailze("3.2.0")), + V_3_2_1("3.2.1", normailze("3.2.1")), + V_3_2_3("3.2.3", normailze("3.2.3")), + V_3_3_0("3.3.0", normailze("3.3.0")), + V_3_3_1("3.3.1", normailze("3.3.1")), + V_3_3_2("3.3.2", normailze("3.3.2")), + + V_3_4_0("3.4.0", normailze("3.4.0")), + V_3_4_1("3.4.1", normailze("3.4.1")), + + V_3_5_0("3.5.0", normailze("3.5.0")), + V_3_5_1("3.5.1", normailze("3.5.1")), + + V_3_6_0("3.6.0", normailze("3.6.0")), + V_MAX("x.x.x.x", Long.MAX_VALUE), diff --git a/km-console/packages/config-manager-fe/config/webpack.common.js b/km-console/packages/config-manager-fe/config/webpack.common.js index cef88e615..7521137bc 100644 --- a/km-console/packages/config-manager-fe/config/webpack.common.js +++ b/km-console/packages/config-manager-fe/config/webpack.common.js @@ -16,6 +16,13 @@ const babelOptions = { cacheDirectory: true, babelrc: false, presets: [require.resolve('@babel/preset-env'), require.resolve('@babel/preset-typescript'), require.resolve('@babel/preset-react')], + overrides: [ + // TODO:编译时需要做的事情更多,应该只针对目标第三方库 + { + include: './node_modules', + sourceType: 'unambiguous' + } + ], plugins: [ [require.resolve('@babel/plugin-proposal-decorators'), { legacy: true }], [require.resolve('@babel/plugin-proposal-class-properties'), { loose: true }], diff --git a/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx b/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx index eb0b82e9a..d50c77cfd 100644 --- a/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx +++ b/km-console/packages/config-manager-fe/src/pages/UserManage/RoleTabContent.tsx @@ -96,7 +96,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => { arr.push(permissions[i].id); } }); - formData.permissionIdList = formData.permissionIdList.flat(); + formData.permissionIdList = formData.permissionIdList.flat().filter((item) => item !== undefined); setConfirmLoading(true); request(api.editRole, { method: type === RoleOperate.Add ? 'POST' : 'PUT', @@ -250,7 +250,7 @@ const RoleDetailAndUpdate = forwardRef((props, ref): JSX.Element => { getApi('/group-offsets'), getGroupOverview: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/groups-overview`), - + deleteGroupOffset: () => getApi('/group-offsets'), // topics列表 getTopicsList: (clusterPhyId: number) => getApi(`/clusters/${clusterPhyId}/topics-overview`), getReassignmentList: () => getApi(`/reassignment/topics-overview`), @@ -108,6 +108,7 @@ const api = { getTopicState: (clusterPhyId: number, topicName: string) => getApi(`/clusters/${clusterPhyId}/topics/${topicName}/state`), getTopicMetadata: (clusterPhyId: number, topicName: string) => getApi(`/clusters/${clusterPhyId}/topics/${topicName}/metadata-combine-exist`), + deleteTopicData: () => getApi(`/topics/truncate-topic`), // 最新的指标值 getMetricPointsLatest: (clusterPhyId: number) => getApi(`/physical-clusters/${clusterPhyId}/latest-metrics`), diff --git a/km-console/packages/layout-clusters-fe/src/app.tsx b/km-console/packages/layout-clusters-fe/src/app.tsx index edffd98a1..66d3aa8b5 100755 --- a/km-console/packages/layout-clusters-fe/src/app.tsx +++ b/km-console/packages/layout-clusters-fe/src/app.tsx @@ -21,6 +21,7 @@ import { getLicenseInfo } from './constants/common'; import api from './api'; import ClusterContainer from './pages/index'; import ksLogo from './assets/ks-logo.png'; +import {ClustersPermissionMap} from "./pages/CommonConfig"; interface ILocaleMap { [index: string]: any; @@ -79,6 +80,9 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => { const userInfo = localStorage.getItem('userInfo'); const [curActiveAppName, setCurActiveAppName] = useState(''); const [versionInfo, setVersionInfo] = useState(); + const [global] = AppContainer.useGlobalValue(); + const quickEntries=[]; + useEffect(() => { if (pathname.startsWith('/config')) { setCurActiveAppName('config'); @@ -105,6 +109,23 @@ const AppContent = (props: { setlanguage: (language: string) => void }) => { }); }, []); + if (global.hasPermission && global.hasPermission(ClustersPermissionMap.CLUSTERS_MANAGE_VIEW)){ + quickEntries.push({ + icon: , + txt: '多集群管理', + ident: '', + active: curActiveAppName === 'cluster', + }); + } + if (global.hasPermission && global.hasPermission(ClustersPermissionMap.SYS_MANAGE_VIEW)){ + quickEntries.push({ + icon: , + txt: '系统管理', + ident: 'config', + active: curActiveAppName === 'config', + }); + } + return ( void }) => { ), username: userInfo ? JSON.parse(userInfo)?.userName : '', icon: , - quickEntries: [ - { - icon: , - txt: '多集群管理', - ident: '', - active: curActiveAppName === 'cluster', - }, - { - icon: , - txt: '系统管理', - ident: 'config', - active: curActiveAppName === 'config', - }, - ], + quickEntries: quickEntries, isFixed: false, userDropMenuItems: [ diff --git a/km-console/packages/layout-clusters-fe/src/components/CardBar/ConnectDetailCard.tsx b/km-console/packages/layout-clusters-fe/src/components/CardBar/ConnectDetailCard.tsx index f3f9f5453..615490958 100644 --- a/km-console/packages/layout-clusters-fe/src/components/CardBar/ConnectDetailCard.tsx +++ b/km-console/packages/layout-clusters-fe/src/components/CardBar/ConnectDetailCard.tsx @@ -49,7 +49,7 @@ const ConnectDetailCard = (props: { record: any }) => { return ( <> { - + {Utils.firstCharUppercase(type) || '-'} } @@ -64,7 +64,7 @@ const ConnectDetailCard = (props: { record: any }) => { return ( <> { - + {Utils.firstCharUppercase(state) || '-'} } diff --git a/km-console/packages/layout-clusters-fe/src/components/utils.ts b/km-console/packages/layout-clusters-fe/src/components/utils.ts new file mode 100644 index 000000000..3c88d6fb8 --- /dev/null +++ b/km-console/packages/layout-clusters-fe/src/components/utils.ts @@ -0,0 +1,10 @@ +import { useCallback, useState } from 'react'; + +export function useForceRefresh() { + const [refreshKey, setRefresh] = useState(0); + const forceRefresh: () => void = useCallback(() => { + setRefresh((x) => x + 1); + }, []); + + return [refreshKey, forceRefresh]; +} diff --git a/km-console/packages/layout-clusters-fe/src/pages/CommonConfig.tsx b/km-console/packages/layout-clusters-fe/src/pages/CommonConfig.tsx index 4d01da979..06ed6d5df 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/CommonConfig.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/CommonConfig.tsx @@ -7,6 +7,9 @@ import { goLogin } from '@src/constants/axiosConfig'; export enum ClustersPermissionMap { CLUSTERS_MANAGE = '多集群管理', CLUSTERS_MANAGE_VIEW = '多集群管理查看', + //仅用作隐藏掉系统管理菜单 + SYS_MANAGE = '系统管理', + SYS_MANAGE_VIEW = '系统管理查看', // Cluster CLUSTER_ADD = '接入集群', CLUSTER_DEL = '删除集群', @@ -30,6 +33,9 @@ export enum ClustersPermissionMap { TOPIC_CANCEL_REPLICATOR = 'Topic-详情-取消Topic复制', // Consumers CONSUMERS_RESET_OFFSET = 'Consumers-重置Offset', + GROUP_DELETE = 'Group-删除', + GROUP_TOPIC_DELETE = 'GroupOffset-Topic纬度删除', + GROUP_PARTITION_DELETE = 'GroupOffset-Partition纬度删除', // Test TEST_CONSUMER = 'Test-Consumer', TEST_PRODUCER = 'Test-Producer', @@ -39,6 +45,19 @@ export enum ClustersPermissionMap { MM2_DELETE = 'MM2-删除', MM2_RESTART = 'MM2-重启', MM2_STOP_RESUME = 'MM2-暂停&恢复', + // Connector + CONNECTOR_ADD = 'Connector-新增', + CONNECTOR_CHANGE_CONFIG = 'Connector-编辑', + CONNECTOR_DELETE = 'Connector-删除', + CONNECTOR_RESTART = 'Connector-重启', + CONNECTOR_STOP_RESUME = 'Connector-暂停&恢复', + // Security + SECURITY_ACL_ADD = 'Security-ACL新增', + SECURITY_ACL_DELETE = 'Security-ACL删除', + SECURITY_USER_ADD = 'Security-User新增', + SECURITY_USER_DELETE = 'Security-User删除', + SECURITY_USER_EDIT_PASSWORD = 'Security-User修改密码', + SECURITY_USER_VIEW_PASSWORD = 'Security-User查看密码', } export interface PermissionNode { @@ -88,6 +107,11 @@ const CommonConfig = () => { clustersPermissions && clustersPermissions.childList.forEach((node: PermissionNode) => node.has && userPermissions.push(node.permissionName)); + // 获取用户在系统管理拥有的权限 + const configPermissions = userPermissionTree.find((sys: PermissionNode) => sys.permissionName === ClustersPermissionMap.SYS_MANAGE); + configPermissions && + configPermissions.childList.forEach((node: PermissionNode) => node.has && userPermissions.push(node.permissionName)); + const hasPermission = (permissionName: ClustersPermissionMap) => permissionName && userPermissions.includes(permissionName); setGlobal((curState: any) => ({ ...curState, permissions: allPermissions, userPermissions, hasPermission, userInfo })); diff --git a/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnector.tsx b/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnector.tsx index 8724a0291..fa9f857dd 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnector.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnector.tsx @@ -189,7 +189,14 @@ const StepFormFirst = (props: SubFormProps) => { const result: FormConnectorConfigs = { pluginConfig: {}, }; + + // 获取一份默认配置 + const defaultPluginConfig: any = {}; + pluginConfig.configs.forEach(({ definition }) => { + // 获取一份默认配置 + defaultPluginConfig[definition.name] = definition?.defaultValue; + if (!getExistFormItems(pluginType).includes(definition.name)) { const pluginConfigs = result.pluginConfig; const group = definition.group || 'Others'; @@ -205,7 +212,7 @@ const StepFormFirst = (props: SubFormProps) => { Object.keys(result).length && form.setFieldsValue({ - configs: result, + configs: { ...result, defaultPluginConfig, editConnectorConfig: result.connectorConfig }, }); }) .finally(() => props.setSubmitLoading(false)); @@ -816,6 +823,8 @@ const StepFormFifth = (props: SubFormProps) => { ) : type.toUpperCase() === 'BOOLEAN' ? ( + ) : type.toUpperCase() === 'PASSWORD' ? ( + ) : ( )} @@ -947,7 +956,7 @@ export default forwardRef( success?: { connectClusterId: number; connectorName: string; - configs: { + config: { [key: string]: any; }; }; @@ -955,6 +964,7 @@ export default forwardRef( }) => void ) => { const promises: Promise[] = []; + const compareConfig = stepsFormRef.current[0].getFieldValue('configs'); // 获取步骤一的form信息 Object.values(stepsFormRef.current).forEach((form, i) => { const promise = form .validateFields() @@ -985,11 +995,22 @@ export default forwardRef( const [k, ...v] = l.split('='); result[k] = v.join('='); }); + + const editConnectorConfig = operateInfo.type === 'edit' ? compareConfig.editConnectorConfig : {}; // 编辑状态时拿到config配置 + const newCompareConfig = { ...compareConfig.defaultPluginConfig, ...editConnectorConfig, ...result }; // 整合后的表单提交信息 + Object.keys(newCompareConfig).forEach((item) => { + if ( + newCompareConfig[item] === compareConfig.defaultPluginConfig[item] || + newCompareConfig[item]?.toString() === compareConfig.defaultPluginConfig[item]?.toString() + ) { + delete newCompareConfig[item]; // 清除默认值 + } + }); callback({ success: { connectClusterId: res[0].connectClusterId, connectorName: result['name'], - configs: result, + config: newCompareConfig, }, }); }, @@ -1013,7 +1034,7 @@ export default forwardRef( curClusterName = cluster.label; } }); - (jsonRef as any)?.onOpen(operateInfo.type, curClusterName, info.success.configs); + (jsonRef as any)?.onOpen(operateInfo.type, curClusterName, info.success.config); onClose(); } }); @@ -1026,9 +1047,9 @@ export default forwardRef( setCurrentStep(info.error); } else { setSubmitLoading(true); - Object.entries(info.success.configs).forEach(([key, val]) => { + Object.entries(info.success.config).forEach(([key, val]) => { if (val === null) { - delete info.success.configs[key]; + delete info.success.config[key]; } }); Utils.put(api.validateConnectorConfig, info.success).then( diff --git a/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnectorUseJSON.tsx b/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnectorUseJSON.tsx index b93513460..37d646e9d 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnectorUseJSON.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Connect/AddConnectorUseJSON.tsx @@ -1,7 +1,7 @@ import api from '@src/api'; import CodeMirrorFormItem from '@src/components/CodeMirrorFormItem'; import customMessage from '@src/components/Message'; -import { Button, Divider, Drawer, Form, message, Space, Utils } from 'knowdesign'; +import { Button, Divider, Drawer, Form, message, Space, Utils, Select } from 'knowdesign'; import React, { forwardRef, useEffect, useImperativeHandle, useState } from 'react'; import { useParams } from 'react-router-dom'; import { ConnectCluster, ConnectorPlugin, ConnectorPluginConfig, OperateInfo } from './AddConnector'; @@ -9,9 +9,8 @@ import { ConnectCluster, ConnectorPlugin, ConnectorPluginConfig, OperateInfo } f const PLACEHOLDER = `配置格式如下 { - "connectClusterName": "", // Connect Cluster 名称 - "configs": { // 具体配置项 - "name": "", + "name": "", // Connect Cluster 名称 + "config": { // 具体配置项 "connector.class": "", "tasks.max": 1, ... @@ -43,11 +42,16 @@ export default forwardRef((props: any, ref) => { const onOpen = (type: 'create' | 'edit', connectClusterName?: string, defaultConfigs?: { [key: string]: any }) => { if (defaultConfigs) { setDefaultConfigs({ ...defaultConfigs, connectClusterName }); + const connectorName = connectClusterName; + const connectClusterId = connectClusters.find((cluster) => cluster.label === connectClusterName).value; form.setFieldsValue({ + connectClusterId, + connectorName, configs: JSON.stringify( { - connectClusterName, - configs: defaultConfigs, + // connectClusterName, + name: defaultConfigs.name, + config: { ...defaultConfigs, name: undefined }, }, null, 2 @@ -63,13 +67,14 @@ export default forwardRef((props: any, ref) => { form.validateFields().then( (data) => { const postData = JSON.parse(data.configs); - postData.connectorName = postData.configs.name; - postData.connectClusterId = connectClusters.find((cluster) => cluster.label === postData.connectClusterName).value; - delete postData.connectClusterName; - - Object.entries(postData.configs).forEach(([key, val]) => { + postData.connectorName = postData.name; + postData.connectClusterId = data.connectClusterId; + postData.config.name = postData.name; + // delete postData.connectClusterName; + delete postData.name; + Object.entries(postData.config).forEach(([key, val]) => { if (val === null) { - delete postData.configs[key]; + delete postData.config[key]; } }); Utils.put(api.validateConnectorConfig, postData).then( @@ -161,6 +166,26 @@ export default forwardRef((props: any, ref) => { } >
+ + + +
+ + + ); +}; diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/Detail.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/Detail.tsx index 34001fa03..6438a139d 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/Detail.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/Detail.tsx @@ -1,12 +1,13 @@ import React, { useState, useEffect } from 'react'; import { useParams, useHistory } from 'react-router-dom'; -import { Drawer, ProTable, Utils } from 'knowdesign'; +import { Button, Space, Divider, Drawer, ProTable, Utils, notification } from 'knowdesign'; import { IconFont } from '@knowdesign/icons'; import API from '@src/api/index'; import { defaultPagination, hashDataParse } from '@src/constants/common'; import { getGtoupTopicColumns } from './config'; import { ExpandedRow } from './ExpandedRow'; import ResetOffsetDrawer from './ResetOffsetDrawer'; +import { useForceRefresh } from '@src/components/utils'; const { request } = Utils; export interface MetricLine { @@ -63,6 +64,7 @@ const GroupDetail = (props: any) => { const [openKeys, setOpenKeys] = useState(); const [resetOffsetVisible, setResetOffsetVisible] = useState(false); const [resetOffsetArg, setResetOffsetArg] = useState({}); + const [refreshKey, forceRefresh] = useForceRefresh(); const genData = async ({ pageNo, pageSize, groupName }: any) => { if (urlParams?.clusterId === undefined) return; @@ -110,6 +112,23 @@ const GroupDetail = (props: any) => { groupName: record?.groupName, }); }; + // 删除消费组Topic + const deleteOffset = (record: any) => { + const params = { + clusterPhyId: +urlParams?.clusterId, + deleteType: 1, // 0:group纬度,1:Topic纬度,2:Partition纬度 + groupName: record.groupName, + topicName: record.topicName, + }; + Utils.delete(API.deleteGroupOffset(), { data: params }).then((data: any) => { + if (data === null) { + notification.success({ + message: '删除Topic成功!', + }); + genData({ pageNo: 1, pageSize: pagination.pageSize, groupName: hashData.groupName }); + } + }); + }; const onTableChange = (pagination: any, filters: any, sorter: any) => { genData({ pageNo: pagination.current, pageSize: pagination.pageSize, filters, sorter, groupName: hashData.groupName }); @@ -160,7 +179,7 @@ const GroupDetail = (props: any) => { // // 获取Consumer列表 表格模式 // getTopicGroupMetric(hashData); // }); - }, [hashDataParse(location.hash).groupName]); + }, [hashDataParse(location.hash).groupName, refreshKey]); return ( { // // // } + extra={ + + void}> + + + + + } > { showHeader: false, rowKey: 'key', loading: loading, - columns: getGtoupTopicColumns({ resetOffset }), + columns: getGtoupTopicColumns({ resetOffset, deleteOffset }), dataSource: topicData, paginationProps: { ...pagination }, // noPagination: true, @@ -209,6 +236,7 @@ const GroupDetail = (props: any) => { chartData={chartData} groupName={hashDataParse(location.hash).groupName} loading={loadingObj} + refreshKey={refreshKey} /> ), // expandedRowRender, @@ -241,7 +269,12 @@ const GroupDetail = (props: any) => { }, }} /> - + ); }; diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ExpandedRow.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ExpandedRow.tsx index 3e9519437..3c2c24d00 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ExpandedRow.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ExpandedRow.tsx @@ -41,7 +41,7 @@ const metricWithType = [ { metricName: 'Lag', metricType: 102 }, ]; -export const ExpandedRow: any = ({ record, groupName }: any) => { +export const ExpandedRow: any = ({ record, groupName, refreshKey }: any) => { const params: any = useParams<{ clusterId: string; }>(); @@ -193,7 +193,7 @@ export const ExpandedRow: any = ({ record, groupName }: any) => { endTime: timeRange[1], topNu: 0, }; - Utils.post(API.getTopicGroupMetricHistory(clusterId), params).then((data: Array) => { + Utils.post(API.getTopicGroupMetricHistory(clusterId), params, { timeout: 300000 }).then((data: Array) => { // ! 替换接口返回 setAllGroupMetricsData(data); }); @@ -210,10 +210,6 @@ export const ExpandedRow: any = ({ record, groupName }: any) => { getTopicGroupMetric({ pagination, sorter }); }; - // useEffect(() => { - // getTopicGroupMetric(); - // }, [sortObj]); - useEffect(() => { const hashData = hashDataParse(location.hash); // if (!hashData.groupName) return; @@ -242,7 +238,7 @@ export const ExpandedRow: any = ({ record, groupName }: any) => { // 获取Consumer列表 表格模式 getTopicGroupMetric({}); }); - }, [hashDataParse(location.hash).groupName]); + }, [hashDataParse(location.hash).groupName, refreshKey]); useEffect(() => { if (partitionList.length === 0) return; diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx index 75cc390a0..1e53f350c 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/ResetOffsetDrawer.tsx @@ -44,7 +44,7 @@ const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number }; export default (props: any) => { - const { record, visible, setVisible } = props; + const { record, visible, setVisible, resetOffsetFn } = props; const routeParams = useParams<{ clusterId: string; }>(); @@ -106,6 +106,8 @@ export default (props: any) => { message: '重置offset成功', }); setVisible(false); + // 发布重置offset成功的消息 + resetOffsetFn(); } else { notification.error({ message: '重置offset失败', diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/config.tsx index 8edd92802..977006e24 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/config.tsx @@ -1,8 +1,9 @@ /* eslint-disable @typescript-eslint/explicit-module-boundary-types */ import React from 'react'; -import { AppContainer } from 'knowdesign'; +import { AppContainer, Button, Popconfirm } from 'knowdesign'; import TagsWithHide from '@src/components/TagsWithHide'; import { ClustersPermissionMap } from '../CommonConfig'; +import Delete from './Delete'; export const runningStatusEnum: any = { 1: 'Doing', @@ -21,7 +22,8 @@ export const defaultPagination = { }; export const getGroupColumns = (arg?: any) => { - const columns = [ + const [global] = AppContainer.useGlobalValue(); + const columns: any = [ { title: 'ConsumerGroup', dataIndex: 'name', @@ -63,6 +65,23 @@ export const getGroupColumns = (arg?: any) => { render: (t: number) => (t ? t.toLocaleString() : '-'), }, ]; + if (global.hasPermission && global.hasPermission(ClustersPermissionMap.GROUP_DELETE)) { + columns.push({ + title: '操作', + dataIndex: 'options', + key: 'options', + width: 200, + filterTitle: true, + fixed: 'right', + render: (_t: any, r: any) => { + return ( +
+ +
+ ); + }, + }); + } return columns; }; @@ -98,16 +117,33 @@ export const getGtoupTopicColumns = (arg?: any) => { render: (t: number) => (t ? t.toLocaleString() : '-'), }, ]; - if (global.hasPermission && global.hasPermission(ClustersPermissionMap.CONSUMERS_RESET_OFFSET)) { + if (global.hasPermission) { columns.push({ title: '操作', dataIndex: 'desc', key: 'desc', - width: 150, + width: 200, render: (value: any, record: any) => { return (
- arg.resetOffset(record)}>重置Offset + {global.hasPermission(ClustersPermissionMap.CONSUMERS_RESET_OFFSET) ? ( + arg.resetOffset(record)}>重置Offset + ) : ( + <> + )} + {global.hasPermission(ClustersPermissionMap.GROUP_TOPIC_DELETE) ? ( + arg.deleteOffset(record)} + okText="是" + cancelText="否" + > + + + ) : ( + <> + )}
); }, diff --git a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/index.tsx index 8e0bb6fba..b54a5878a 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/ConsumerGroup/index.tsx @@ -58,6 +58,11 @@ const BrokerList: React.FC = (props: any) => { genData({ pageNo: pagination.current, pageSize: pagination.pageSize, filters, sorter }); }; + // 删除Group + const deleteTesk = () => { + genData({ pageNo: 1, pageSize: pagination.pageSize }); + }; + useEffect(() => { genData({ pageNo: 1, @@ -115,7 +120,7 @@ const BrokerList: React.FC = (props: any) => { showHeader: false, rowKey: 'group_list', loading: loading, - columns: getGroupColumns(), + columns: getGroupColumns(deleteTesk), dataSource: data, paginationProps: { ...pagination }, attrs: { diff --git a/km-console/packages/layout-clusters-fe/src/pages/Login/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/Login/index.tsx index 998022311..f07e20334 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/Login/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/Login/index.tsx @@ -13,7 +13,7 @@ const carouselList = [
Github: - 5.8K + 6.8K + Star的的实时流处理平台
diff --git a/km-console/packages/layout-clusters-fe/src/pages/MirrorMaker2/index.less b/km-console/packages/layout-clusters-fe/src/pages/MirrorMaker2/index.less index 826649028..d73f5625a 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MirrorMaker2/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/MirrorMaker2/index.less @@ -185,7 +185,7 @@ .operate-connector-drawer-use-json { .CodeMirror.cm-s-default { - height: calc(100vh - 146px); + height: calc(100vh - 196px); } .dcloud-form-item { margin-bottom: 0 !important; diff --git a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx index 28992f47d..fec1a8a6b 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/MutliClusterPage/AccessCluster.tsx @@ -522,28 +522,22 @@ const ConnectorForm = (props: { const params = { ...values, id: initFieldsValue?.id, - jmxProperties: values.jmxProperties ? `{ "jmxProperties": "${values.jmxProperties}" }` : undefined, + jmxProperties: values.jmxProperties ? `{ "jmxPort": "${values.jmxProperties}" }` : undefined, }; - Utils.put(api.batchConnectClusters, [params]) - .then((res) => { - // setSelectedTabKey(undefined); - getConnectClustersList(); - notification.success({ - message: '修改Connect集群成功', - }); - }) - .catch((error) => { - notification.success({ - message: '修改Connect集群失败', - }); + Utils.put(api.batchConnectClusters, [params]).then((res) => { + // setSelectedTabKey(undefined); + getConnectClustersList(); + notification.success({ + message: '修改Connect集群成功', }); + }); }; const onCancel = () => { setSelectedTabKey(undefined); try { const jmxPortInfo = JSON.parse(initFieldsValue.jmxProperties) || {}; - form.setFieldsValue({ ...initFieldsValue, jmxProperties: jmxPortInfo.jmxProperties }); + form.setFieldsValue({ ...initFieldsValue, jmxProperties: jmxPortInfo.jmxPort }); } catch { form.setFieldsValue({ ...initFieldsValue }); } @@ -552,7 +546,7 @@ const ConnectorForm = (props: { useLayoutEffect(() => { try { const jmxPortInfo = JSON.parse(initFieldsValue.jmxProperties) || {}; - form.setFieldsValue({ ...initFieldsValue, jmxProperties: jmxPortInfo.jmxProperties }); + form.setFieldsValue({ ...initFieldsValue, jmxProperties: jmxPortInfo.jmxPort }); } catch { form.setFieldsValue({ ...initFieldsValue }); } diff --git a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx index a325a9bce..1cc3a9099 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/EditDrawer.tsx @@ -132,16 +132,35 @@ const AddDrawer = forwardRef((_, ref) => { form.validateFields().then((formData) => { const submitData = []; const { configType, principle, kafkaUser } = formData; - if (configType === 'custom') { // 1. 自定义权限 - const { resourceType, resourcePatternType, aclPermissionType, aclOperation, aclClientHost } = formData; + // TODO: 需要和后端联调 + const { + resourceType, + resourcePatternType, + aclPermissionType, + aclOperation, + aclClientHost, + cluster, + topicName, + topicPatternType, + groupName, + groupPatternType, + transactionalId, + transactionalIdPatternType, + } = formData; submitData.push({ clusterId, kafkaUser: principle === 'all' ? '*' : kafkaUser, resourceType, - resourcePatternType, - resourceName: '*', + resourcePatternType: cluster + ? 3 + : topicPatternType + ? topicPatternType + : groupPatternType + ? groupPatternType + : transactionalIdPatternType, + resourceName: cluster ? cluster : topicName ? topicName : groupName ? groupName : transactionalId, aclPermissionType, aclOperation, aclClientHost, @@ -281,6 +300,42 @@ const AddDrawer = forwardRef((_, ref) => { {({ getFieldValue }) => { + const SelectFormItems = (props: { type: string }) => { + const { type } = props; + return ( + ({ + validator: (rule: any, value: string) => { + if (!value) { + return Promise.reject(`${type}Name 不能为空`); + } + if (type === 'topic' && getFieldValue(`${type}PatternType`) === ACL_PATTERN_TYPE['Literal']) { + return Utils.request(api.getTopicMetadata(clusterId as any, value)).then((res: any) => { + return res?.exist ? Promise.resolve() : Promise.reject('该 Topic 不存在'); + }); + } + return Promise.resolve(); + }, + }), + ]} + > + { + if (option?.value.includes(value)) { + return true; + } + return false; + }} + options={type === 'topic' ? topicMetaData : groupMetaData} + placeholder={`请输入 ${type}Name`} + /> + + ); + }; const PatternTypeFormItems = (props: { type: string }) => { const { type } = props; const UpperCaseType = type[0].toUpperCase() + type.slice(1); @@ -311,37 +366,43 @@ const AddDrawer = forwardRef((_, ref) => { {({ getFieldValue }) => getFieldValue(`${type}Principle`) === 'special' ? ( - ({ - validator: (rule: any, value: string) => { - if (!value) { - return Promise.reject(`${UpperCaseType}Name 不能为空`); - } - if (type === 'topic' && getFieldValue(`${type}PatternType`) === ACL_PATTERN_TYPE['Literal']) { - return Utils.request(api.getTopicMetadata(clusterId as any, value)).then((res: any) => { - return res?.exist ? Promise.resolve() : Promise.reject('该 Topic 不存在'); - }); + type !== 'transactionalId' ? ( + ({ + validator: (rule: any, value: string) => { + if (!value) { + return Promise.reject(`${UpperCaseType}Name 不能为空`); + } + if (type === 'topic' && getFieldValue(`${type}PatternType`) === ACL_PATTERN_TYPE['Literal']) { + return Utils.request(api.getTopicMetadata(clusterId as any, value)).then((res: any) => { + return res?.exist ? Promise.resolve() : Promise.reject('该 Topic 不存在'); + }); + } + return Promise.resolve(); + }, + }), + ]} + > + { + if (option?.value.includes(value)) { + return true; } - return Promise.resolve(); - }, - }), - ]} - > - { - if (option?.value.includes(value)) { - return true; - } - return false; - }} - options={type === 'topic' ? topicMetaData : groupMetaData} - placeholder={`请输入 ${type}Name`} - /> - + return false; + }} + options={type === 'topic' ? topicMetaData : groupMetaData} + placeholder={`请输入 ${type}Name`} + /> + + ) : ( + + + + ) ) : null } @@ -363,7 +424,7 @@ const AddDrawer = forwardRef((_, ref) => { Deny - { Literal Prefixed - + */} { }))} /> + + {({ getFieldValue }) => { + const type = getFieldValue('resourceType'); + if (type === ACL_RESOURCE_TYPE['Cluster']) { + //TODO需要和后端获取集群和事务接口联调 + return ( + + + + ); + } else if (type === ACL_RESOURCE_TYPE['TransactionalId']) { + return ; + } else if (type === ACL_RESOURCE_TYPE['Topic']) { + return ; + } else if (type === ACL_RESOURCE_TYPE['Group']) { + return ; + } + return null; + }} + {({ getFieldValue }) => { form.resetFields(['aclOperation']); diff --git a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx index 9919afdda..46befe501 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/SecurityACLs/index.tsx @@ -14,6 +14,7 @@ import AddACLDrawer, { RESOURCE_TO_OPERATIONS_MAP, RESOURCE_MAP_KEYS, } from './EditDrawer'; +import { ClustersPermissionMap } from '../CommonConfig'; import './index.less'; const { confirm } = Modal; @@ -105,7 +106,7 @@ const SecurityACLs = (): JSX.Element => { }; const columns = () => { - const baseColumns = [ + const baseColumns: any = [ { title: 'Principal', dataIndex: 'kafkaUser', @@ -143,7 +144,9 @@ const SecurityACLs = (): JSX.Element => { title: 'Host', dataIndex: 'aclClientHost', }, - { + ]; + if (global.hasPermission && global.hasPermission(ClustersPermissionMap.SECURITY_ACL_DELETE)) { + baseColumns.push({ title: '操作', dataIndex: '', width: 120, @@ -156,8 +159,8 @@ const SecurityACLs = (): JSX.Element => { ); }, - }, - ]; + }); + } return baseColumns; }; @@ -238,15 +241,19 @@ const SecurityACLs = (): JSX.Element => {
-
- -
+ {global.hasPermission && global.hasPermission(ClustersPermissionMap.SECURITY_ACL_ADD) ? ( +
+ +
+ ) : ( + <> + )} { const maxPos = chars.length; let str = ''; @@ -85,7 +85,7 @@ const PasswordContent = (props: { clusterId: string; name: string }) => { const { clusterId, name } = props; const [loading, setLoading] = useState(false); const [pw, setPw] = useState(initialShowPassword); - + const [global] = AppContainer.useGlobalValue(); const switchPwStatus = () => { if (!loading) { setLoading(true); @@ -113,9 +113,13 @@ const PasswordContent = (props: { clusterId: string; name: string }) => {
{pw}
- - {loading ? : pw === initialShowPassword ? : } - + {global.hasPermission(ClustersPermissionMap.SECURITY_USER_VIEW_PASSWORD) ? ( + + {loading ? : pw === initialShowPassword ? : } + + ) : ( + <> + )} ); }; @@ -323,7 +327,7 @@ const SecurityUsers = (): JSX.Element => { }; const columns = () => { - const baseColumns = [ + const baseColumns: any = [ { title: 'KafkaUser', dataIndex: 'name', @@ -348,30 +352,39 @@ const SecurityUsers = (): JSX.Element => { return ; }, }, - { + ]; + if (global.hasPermission) { + baseColumns.push({ title: '操作', dataIndex: '', width: 240, render(record: UsersProps) { return ( <> - - + {global.hasPermission(ClustersPermissionMap.SECURITY_USER_EDIT_PASSWORD) ? ( + + ) : ( + <> + )} + {global.hasPermission(ClustersPermissionMap.SECURITY_USER_DELETE) ? ( + + ) : ( + <> + )} ); }, - }, - ]; - + }); + } return baseColumns; }; @@ -454,13 +467,17 @@ const SecurityUsers = (): JSX.Element => { setSearchKeywordsInput(e.target.value); }} /> - + {global.hasPermission && global.hasPermission(ClustersPermissionMap.SECURITY_USER_ADD) ? ( + + ) : ( + <> + )} diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx index 9b3a12a09..f2bb1713b 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/BrokersDetail.tsx @@ -8,6 +8,7 @@ import { useParams } from 'react-router-dom'; import TagsWithHide from '@src/components/TagsWithHide'; import SwitchTab from '@src/components/SwitchTab'; import RenderEmpty from '@src/components/RenderEmpty'; +import { useForceRefresh } from '@src/components/utils'; interface PropsType { hashData: any; @@ -401,11 +402,18 @@ export default (props: PropsType) => { const { hashData } = props; const [showMode, setShowMode] = useState('card'); + const [refreshKey, forceRefresh] = useForceRefresh(); return ( <> -
+
+ void} + > + +
diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConsumerGroupDetail.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConsumerGroupDetail.tsx index bd502b1a2..0183dca54 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConsumerGroupDetail.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ConsumerGroupDetail.tsx @@ -10,6 +10,7 @@ import { ClustersPermissionMap } from '../CommonConfig'; import ResetOffsetDrawer from './ResetOffsetDrawer'; import SwitchTab from '@src/components/SwitchTab'; import ContentWithCopy from '@src/components/CopyContent'; +import PubSub from "pubsub-js"; const { Option } = Select; @@ -335,6 +336,11 @@ export default (props: any) => { }); }, [visible]); +// 订阅重置offset成功的消息 + PubSub.subscribe('TopicDetail-ResetOffset', function(message, data){ + getTopicGroupMetric({hashData: data}); + }) + useEffect(() => { if (partitionList.length === 0) return; getTopicGroupMetricHistory(partitionList, hashData); diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx index ed948e8c0..25674eaa6 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/ResetOffsetDrawer.tsx @@ -4,6 +4,7 @@ import { useParams } from 'react-router-dom'; import EditTable from '../TestingProduce/component/EditTable'; import Api from '@src/api/index'; import moment from 'moment'; +import PubSub from "pubsub-js"; const CustomSelectResetTime = (props: { value?: string; onChange?: (val: Number | String) => void }) => { const { value, onChange } = props; @@ -106,6 +107,13 @@ export default (props: any) => { message: '重置offset成功', }); setResetOffsetVisible(false); + // 发布重置offset成功的消息 + PubSub.publish('TopicDetail-ResetOffset', + { + groupName: record.groupName, + topicName: record.topicName + } + ); } else { notification.error({ message: '重置offset失败', diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/config.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/config.tsx index aa1e25d3e..70055b2da 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/config.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/config.tsx @@ -81,7 +81,8 @@ export const getTopicMessagesColmns = () => { title: 'Offset', dataIndex: 'offset', key: 'offset', - render: (t: number) => (t ? t.toLocaleString() : '-'), + sorter: true, + render: (t: number) => (+t || +t === 0 ? t.toLocaleString() : '-'), // TODO: 千分位展示 }, { title: 'Timestamp', diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.less b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.less index f13249ab0..ea8470f81 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.less +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.less @@ -26,6 +26,7 @@ .left { display: flex; + align-items: center; .info-box { display: flex; height: 36px; diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.tsx index ea009bd44..169f81769 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicDetail/index.tsx @@ -15,9 +15,21 @@ import Replicator from './Replicator'; import './index.less'; import TopicDetailHealthCheck from '@src/components/CardBar/TopicDetailHealthCheck'; import { hashDataParse } from '@src/constants/common'; +import { useForceRefresh } from '@src/components/utils'; const { TabPane } = Tabs; +const Reload = (props: any) => { + return ( + void} + > + + + ); +}; + const OperationsSlot: any = { // eslint-disable-next-line react/display-name // ['Partitions']: (arg: any) => { @@ -70,17 +82,20 @@ const OperationsSlot: any = { // eslint-disable-next-line react/display-name ['ConsumerGroups']: (arg: any) => { return ( - + <> + + + ); }, }; @@ -94,6 +109,7 @@ const TopicDetail = (props: any) => { const [searchValue, setSearchValue] = useState(''); const [visible, setVisible] = useState(false); const [hashData, setHashData] = useState({}); + const [refreshKey, forceRefresh] = useForceRefresh(); const callback = (key: any) => { setSearchValue(''); @@ -184,7 +200,7 @@ const TopicDetail = (props: any) => { onChange={callback} tabBarExtraContent={ OperationsSlot[positionType] && - OperationsSlot[positionType]({ ...props, setSearchKeywords, setSearchValue, searchValue, positionType }) + OperationsSlot[positionType]({ ...props, setSearchKeywords, setSearchValue, searchValue, positionType, forceRefresh }) } destroyInactiveTabPane > @@ -196,7 +212,7 @@ const TopicDetail = (props: any) => { {positionType === 'ConsumerGroups' && ( - + )} diff --git a/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx b/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx index 4c3f88573..0a00fa049 100644 --- a/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx +++ b/km-console/packages/layout-clusters-fe/src/pages/TopicList/index.tsx @@ -1,7 +1,22 @@ /* eslint-disable react/display-name */ import React, { useState, useEffect } from 'react'; import { useHistory, useParams } from 'react-router-dom'; -import { AppContainer, Input, ProTable, Select, Switch, Tooltip, Utils, Dropdown, Menu, Button, Divider, Tag } from 'knowdesign'; +import { + AppContainer, + Input, + ProTable, + Select, + Switch, + Tooltip, + Utils, + Dropdown, + Menu, + Button, + Divider, + Tag, + Popconfirm, + notification, +} from 'knowdesign'; import { IconFont } from '@knowdesign/icons'; import Create from './Create'; import './index.less'; @@ -85,6 +100,21 @@ const AutoPage = (props: any) => { setTopicListLoading(false); }); }; + const deleteTopicData = (record: any) => { + console.log(record, 'record'); + const params = { + clusterId: Number(routeParams.clusterId), + topicName: record.topicName, + }; + Utils.post(Api.deleteTopicData(), params).then((data: any) => { + if (data === null) { + notification.success({ + message: '清除数据成功', + }); + getTopicsList(); + } + }); + }; useEffect(() => { getTopicsList(); }, [sortObj, showInternalTopics, searchKeywords, pageIndex, pageSize]); @@ -247,7 +277,7 @@ const AutoPage = (props: any) => { dataIndex: 'desc', key: 'desc', fixed: 'right', - width: 140, + width: 200, render: (value: any, record: any) => { return (
@@ -257,6 +287,19 @@ const AutoPage = (props: any) => { <> )} {global.hasPermission(ClustersPermissionMap.TOPIC_DEL) ? : <>} + {global.hasPermission(ClustersPermissionMap.TOPIC_DEL) ? ( // TODO:替换为清除数据的权限 + deleteTopicData(record)} + okText="是" + cancelText="否" + > + + + ) : ( + <> + )}
); }, diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/KafkaAclService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/KafkaAclService.java index 3e50771b5..9e9735a96 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/KafkaAclService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/KafkaAclService.java @@ -1,15 +1,13 @@ package com.xiaojukeji.know.streaming.km.core.service.acl; -import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO; +import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService; import org.apache.kafka.common.acl.AclBinding; import org.apache.kafka.common.resource.ResourceType; import java.util.List; -public interface KafkaAclService { - Result> getAclFromKafka(Long clusterPhyId); - +public interface KafkaAclService extends MetaDataService { List getKafkaAclFromDB(Long clusterPhyId); Integer countKafkaAclFromDB(Long clusterPhyId); @@ -17,10 +15,5 @@ public interface KafkaAclService { Integer countResTypeAndDistinctFromDB(Long clusterPhyId, ResourceType resourceType); Integer countKafkaUserAndDistinctFromDB(Long clusterPhyId); - - List getKafkaResTypeAclFromDB(Long clusterPhyId, Integer resType); - List getTopicAclFromDB(Long clusterPhyId, String topicName); - - List getGroupAclFromDB(Long clusterPhyId, String groupName); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/OpKafkaAclService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/OpKafkaAclService.java index 7dd59c754..f6129326b 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/OpKafkaAclService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/OpKafkaAclService.java @@ -3,10 +3,6 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.param.acl.ACLAtomParam; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO; -import org.apache.kafka.common.resource.ResourceType; - -import java.util.Date; -import java.util.List; public interface OpKafkaAclService { /** @@ -19,14 +15,5 @@ public interface OpKafkaAclService { */ Result deleteKafkaAcl(ACLAtomParam aclAtomParam, String operator); - /** - * 删除ACL - */ - Result deleteKafkaAclByResName(ResourceType resourceType, String resourceName, String operator); - Result insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO); - - void batchUpdateAcls(Long clusterPhyId, List poList); - - int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java index 8f1473cda..65258044a 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/KafkaAclServiceImpl.java @@ -11,6 +11,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; import com.xiaojukeji.know.streaming.km.common.constant.KafkaConstant; +import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter; import com.xiaojukeji.know.streaming.km.common.enums.cluster.ClusterAuthTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; @@ -18,8 +19,6 @@ import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService; import com.xiaojukeji.know.streaming.km.core.service.cluster.ClusterPhyService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; -import com.xiaojukeji.know.streaming.km.persistence.cache.LoadedClusterPhyCache; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaAclDAO; @@ -36,11 +35,13 @@ import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.utils.SecurityUtils; import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.DuplicateKeyException; import org.springframework.stereotype.Service; import javax.annotation.PostConstruct; -import java.util.ArrayList; -import java.util.List; +import java.util.*; +import java.util.function.Function; +import java.util.stream.Collectors; import scala.jdk.javaapi.CollectionConverters; @@ -77,18 +78,49 @@ private void init() { } @Override - public Result> getAclFromKafka(Long clusterPhyId) { - if (LoadedClusterPhyCache.getByPhyId(clusterPhyId) == null) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getClusterPhyNotExist(clusterPhyId)); - } - + public Result> getDataFromKafka(ClusterPhy clusterPhy) { try { - return (Result>) versionControlService.doHandler(getVersionItemType(), getMethodName(clusterPhyId, ACL_GET_FROM_KAFKA), new ClusterPhyParam(clusterPhyId)); + Result> dataResult = (Result>) versionControlService.doHandler(getVersionItemType(), getMethodName(clusterPhy.getId(), ACL_GET_FROM_KAFKA), new ClusterPhyParam(clusterPhy.getId())); + if (dataResult.failed()) { + Result.buildFromIgnoreData(dataResult); + } + + return Result.buildSuc(dataResult.getData()); } catch (VCHandlerNotExistException e) { return Result.buildFailure(e.getResultStatus()); } } + @Override + public void writeToDB(Long clusterPhyId, List dataList) { + Map dbPOMap = this.getKafkaAclFromDB(clusterPhyId).stream().collect(Collectors.toMap(KafkaAclPO::getUniqueField, Function.identity())); + + long now = System.currentTimeMillis(); + for (AclBinding aclBinding: dataList) { + KafkaAclPO newPO = KafkaAclConverter.convert2KafkaAclPO(clusterPhyId, aclBinding, now); + KafkaAclPO oldPO = dbPOMap.remove(newPO.getUniqueField()); + if (oldPO == null) { + // 新增的ACL + this.insertAndIgnoreDuplicate(newPO); + } + + // 不需要update + } + + // 删除已经不存在的 + for (KafkaAclPO dbPO: dbPOMap.values()) { + kafkaAclDAO.deleteById(dbPO); + } + } + + @Override + public int deleteInDBByKafkaClusterId(Long clusterPhyId) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId); + + return kafkaAclDAO.delete(lambdaQueryWrapper); + } + @Override public List getKafkaAclFromDB(Long clusterPhyId) { LambdaQueryWrapper queryWrapper = new LambdaQueryWrapper<>(); @@ -116,7 +148,7 @@ public Integer countResTypeAndDistinctFromDB(Long clusterPhyId, ResourceType res return 0; } - return (int)poList.stream().map(elem -> elem.getResourceName()).distinct().count(); + return (int)poList.stream().map(KafkaAclPO::getResourceName).distinct().count(); } @Override @@ -130,15 +162,7 @@ public Integer countKafkaUserAndDistinctFromDB(Long clusterPhyId) { return 0; } - return (int)poList.stream().map(elem -> elem.getPrincipal()).distinct().count(); - } - - @Override - public List getKafkaResTypeAclFromDB(Long clusterPhyId, Integer resType) { - LambdaQueryWrapper queryWrapper = new LambdaQueryWrapper<>(); - queryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId); - queryWrapper.eq(KafkaAclPO::getResourceType, resType); - return kafkaAclDAO.selectList(queryWrapper); + return (int)poList.stream().map(KafkaAclPO::getPrincipal).distinct().count(); } @Override @@ -152,15 +176,6 @@ public List getTopicAclFromDB(Long clusterPhyId, String topicName) { return kafkaAclDAO.selectList(queryWrapper); } - @Override - public List getGroupAclFromDB(Long clusterPhyId, String groupName) { - LambdaQueryWrapper queryWrapper = new LambdaQueryWrapper<>(); - queryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId); - queryWrapper.eq(KafkaAclPO::getResourceType, ResourceType.GROUP.code()); - queryWrapper.eq(KafkaAclPO::getResourceName, groupName); - return kafkaAclDAO.selectList(queryWrapper); - } - /**************************************************** private method ****************************************************/ private Result> getAclByZKClient(VersionItemParam itemParam){ @@ -170,7 +185,7 @@ private Result> getAclByZKClient(VersionItemParam itemParam){ for (ZkAclStore store: CollectionConverters.asJava(ZkAclStore.stores())) { Result> rl = this.getSpecifiedTypeAclByZKClient(param.getClusterPhyId(), store.patternType()); if (rl.failed()) { - return rl; + return Result.buildFromIgnoreData(rl); } aclList.addAll(rl.getData()); @@ -229,4 +244,19 @@ private Result> getSpecifiedTypeAclByZKClient(Long clusterPhyId return Result.buildSuc(kafkaAclList); } + + private Result insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO) { + try { + kafkaAclDAO.insert(kafkaAclPO); + + return Result.buildSuc(); + } catch (DuplicateKeyException dke) { + // 直接写入,如果出现key冲突则直接忽略,因为key冲突时,表示该数据已完整存在,不需要替换任何数据 + return Result.buildSuc(); + } catch (Exception e) { + log.error("method=insertAndIgnoreDuplicate||kafkaAclPO={}||errMsg=exception", kafkaAclPO, e); + + return Result.buildFromRSAndMsg(ResultStatus.MYSQL_OPERATE_FAILED, e.getMessage()); + } + } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/OpKafkaAclServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/OpKafkaAclServiceImpl.java index a8fab1f1b..c3915cded 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/OpKafkaAclServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/acl/impl/OpKafkaAclServiceImpl.java @@ -20,7 +20,6 @@ import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService; import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService; import com.xiaojukeji.know.streaming.km.core.service.version.BaseKafkaVersionControlService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminClient; import com.xiaojukeji.know.streaming.km.persistence.kafka.KafkaAdminZKClient; import com.xiaojukeji.know.streaming.km.persistence.mysql.KafkaAclDAO; @@ -32,7 +31,6 @@ import org.apache.kafka.common.acl.*; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourcePatternFilter; -import org.apache.kafka.common.resource.ResourceType; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.dao.DuplicateKeyException; @@ -41,8 +39,6 @@ import javax.annotation.PostConstruct; import java.util.*; -import java.util.function.Function; -import java.util.stream.Collectors; import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionEnum.*; @@ -169,11 +165,6 @@ public Result deleteKafkaAcl(ACLAtomParam aclAtomParam, String operator) { return rv; } - @Override - public Result deleteKafkaAclByResName(ResourceType resourceType, String resourceName, String operator) { - return Result.buildSuc(); - } - @Override public Result insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO) { try { @@ -190,34 +181,6 @@ public Result insertAndIgnoreDuplicate(KafkaAclPO kafkaAclPO) { } } - @Override - public void batchUpdateAcls(Long clusterPhyId, List poList) { - LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); - lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId); - - Map dbPOMap = kafkaAclDAO.selectList(lambdaQueryWrapper).stream().collect(Collectors.toMap(KafkaAclPO::getUniqueField, Function.identity())); - for (KafkaAclPO po: poList) { - KafkaAclPO dbPO = dbPOMap.remove(po.getUniqueField()); - if (dbPO == null) { - // 新增的ACL - this.insertAndIgnoreDuplicate(po); - } - } - - // 删除已经不存在的 - for (KafkaAclPO dbPO: dbPOMap.values()) { - kafkaAclDAO.deleteById(dbPO); - } - } - - @Override - public int deleteByUpdateTimeBeforeInDB(Long clusterPhyId, Date beforeTime) { - LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); - lambdaQueryWrapper.eq(KafkaAclPO::getClusterPhyId, clusterPhyId); - lambdaQueryWrapper.le(KafkaAclPO::getUpdateTime, beforeTime); - return kafkaAclDAO.delete(lambdaQueryWrapper); - } - /**************************************************** private method ****************************************************/ private Result deleteInDB(KafkaAclPO kafkaAclPO) { diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java index 43a0557cb..6f9c5cfa1 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/cluster/impl/ClusterPhyServiceImpl.java @@ -8,6 +8,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.ClusterPhyAddedEvent; +import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent; import com.xiaojukeji.know.streaming.km.common.bean.po.cluster.ClusterPhyPO; import com.xiaojukeji.know.streaming.km.common.component.SpringTool; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; @@ -146,6 +147,9 @@ public Result removeClusterPhyById(Long clusterPhyId, String operator) { String.format("删除集群:%s",clusterPhy.toString())); opLogWrapService.saveOplogAndIgnoreException(oplogDTO); + // 发布删除集群事件 + SpringTool.publish(new ClusterPhyDeletedEvent(this, clusterPhyId)); + return Result.buildSuc(); } catch (Exception e) { log.error("method=removeClusterPhyById||clusterPhyId={}||operator={}||msg=remove cluster failed||errMsg=exception!", diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/ConnectClusterService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/ConnectClusterService.java index e6ad39297..621a56430 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/ConnectClusterService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/ConnectClusterService.java @@ -4,14 +4,16 @@ import com.xiaojukeji.know.streaming.km.common.bean.dto.connect.cluster.ConnectClusterDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectClusterMetadata; +import com.xiaojukeji.know.streaming.km.common.bean.entity.kafka.KSGroupDescription; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService; import java.util.List; /** * Connect-Cluster */ -public interface ConnectClusterService { +public interface ConnectClusterService extends MetaDataService { Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata); List listByKafkaCluster(Long kafkaClusterPhyId); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterMetricServiceImpl.java index 5ed5af645..1444d3ac1 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterMetricServiceImpl.java @@ -24,9 +24,9 @@ import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterMetricService; import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService; import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient; -import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.ConnectClusterMetricESDAO; +import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.cluster.ConnectClusterMetricESDAO; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.util.CollectionUtils; @@ -43,7 +43,7 @@ * @author didi */ @Service -public class ConnectClusterMetricServiceImpl extends BaseConnectorMetricService implements ConnectClusterMetricService { +public class ConnectClusterMetricServiceImpl extends BaseConnectMetricService implements ConnectClusterMetricService { protected static final ILog LOGGER = LogFactory.getLog(ConnectClusterMetricServiceImpl.class); public static final String CONNECT_CLUSTER_METHOD_GET_WORKER_METRIC_AVG = "getWorkerMetricAvg"; @@ -86,8 +86,7 @@ public Result collectConnectClusterMetricsFromKafkaWithCa String connectClusterMetricKey = CollectedMetricsLocalCache.genConnectClusterMetricCacheKey(connectClusterPhyId, metric); Float keyValue = CollectedMetricsLocalCache.getConnectClusterMetrics(connectClusterMetricKey); if (keyValue != null) { - ConnectClusterMetrics connectClusterMetrics = ConnectClusterMetrics.initWithMetric(connectClusterPhyId,metric,keyValue); - return Result.buildSuc(connectClusterMetrics); + return Result.buildSuc(new ConnectClusterMetrics(connectClusterPhyId, metric, keyValue)); } Result ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, metric); @@ -209,8 +208,7 @@ private Result getConnectWorkerMetricByJMX(Long connectClu try { //2、获取jmx指标 String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxInfo.getJmxObjectName()), jmxInfo.getJmxAttribute()).toString(); - ConnectWorkerMetrics connectWorkerMetrics = ConnectWorkerMetrics.initWithMetric(connectClusterId, workerId, metric, Float.valueOf(value)); - return Result.buildSuc(connectWorkerMetrics); + return Result.buildSuc(new ConnectWorkerMetrics(connectClusterId, workerId, metric, Float.valueOf(value))); } catch (Exception e) { LOGGER.error("method=getConnectWorkerMetricsByJMX||connectClusterId={}||workerId={}||metrics={}||jmx={}||msg={}", connectClusterId, workerId, metric, jmxInfo.getJmxObjectName(), e.getClass().getName()); @@ -231,8 +229,8 @@ private List listTopNConnectClusterIdList(Long clusterPhyId, Integer topN) .collect(Collectors.toList()); } - protected List metricMap2VO(Long connectClusterId, - Map>> map){ + private List metricMap2VO(Long connectClusterId, + Map>> map){ List multiLinesVOS = new ArrayList<>(); if (map == null || map.isEmpty()) { // 如果为空,则直接返回 diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java index 86879662d..c0908b333 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/cluster/impl/ConnectClusterServiceImpl.java @@ -38,6 +38,14 @@ public class ConnectClusterServiceImpl implements ConnectClusterService { @Autowired private OpLogWrapService opLogWrapService; + @Override + public int deleteInDBByKafkaClusterId(Long clusterPhyId) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(ConnectClusterPO::getKafkaClusterPhyId, clusterPhyId); + + return connectClusterDAO.deleteById(lambdaQueryWrapper); + } + @Override public Long replaceAndReturnIdInDB(ConnectClusterMetadata metadata) { ConnectClusterPO oldPO = this.getPOFromDB(metadata.getKafkaClusterPhyId(), metadata.getGroupName()); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/ConnectorService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/ConnectorService.java index 220e4e895..05fe2cf95 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/ConnectorService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/ConnectorService.java @@ -4,49 +4,30 @@ import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo; +import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO; import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum; import java.util.List; -import java.util.Properties; -import java.util.Set; /** * 查看Connector */ -public interface ConnectorService { - Result createConnector(Long connectClusterId, String connectorName, Properties configs, String operator); - +public interface ConnectorService extends MetaDataService { /** * 获取所有的连接器名称列表 */ - Result> listConnectorsFromCluster(Long connectClusterId); + Result> listConnectorsFromCluster(ConnectCluster connectCluster); /** * 获取单个连接器信息 */ Result getConnectorInfoFromCluster(Long connectClusterId, String connectorName); - Result> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName); - Result getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName); - Result getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName); - - Result resumeConnector(Long connectClusterId, String connectorName, String operator); - - Result restartConnector(Long connectClusterId, String connectorName, String operator); - - Result stopConnector(Long connectClusterId, String connectorName, String operator); - - Result deleteConnector(Long connectClusterId, String connectorName, String operator); - - Result updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator); - - void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List connectorList, Set allConnectorNameSet); - - void addNewToDB(KSConnector connector); + Result getConnectorFromKafka(Long connectClusterId, String connectorName); List listByKafkaClusterIdFromDB(Long kafkaClusterPhyId); @@ -57,6 +38,4 @@ public interface ConnectorService { ConnectorPO getConnectorFromDB(Long connectClusterId, String connectorName); ConnectorTypeEnum getConnectorType(Long connectClusterId, String connectorName); - - void completeMirrorMakerInfo(ConnectCluster connectCluster, List connectorList); } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/OpConnectorService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/OpConnectorService.java new file mode 100644 index 000000000..f94c7c080 --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/OpConnectorService.java @@ -0,0 +1,26 @@ +package com.xiaojukeji.know.streaming.km.core.service.connect.connector; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; + +import java.util.Properties; + +/** + * 查看Connector + */ +public interface OpConnectorService { + Result createConnector(Long connectClusterId, String connectorName, Properties configs, String operator); + + Result resumeConnector(Long connectClusterId, String connectorName, String operator); + + Result restartConnector(Long connectClusterId, String connectorName, String operator); + + Result stopConnector(Long connectClusterId, String connectorName, String operator); + + Result deleteConnector(Long connectClusterId, String connectorName, String operator); + + Result updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator); + + void addNewToDB(KSConnector connector); +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorMetricServiceImpl.java index 8792875d3..ffcc16ab4 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorMetricServiceImpl.java @@ -18,6 +18,7 @@ import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricLineVO; import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.line.MetricMultiLinesVO; import com.xiaojukeji.know.streaming.km.common.bean.vo.metrics.point.MetricPointVO; +import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectStatusEnum; import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum; import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; import com.xiaojukeji.know.streaming.km.common.exception.VCHandlerNotExistException; @@ -32,7 +33,7 @@ import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.worker.WorkerService; import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService; import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient; import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.connector.ConnectorMetricESDAO; import org.springframework.beans.factory.annotation.Autowired; @@ -52,7 +53,7 @@ * @author didi */ @Service -public class ConnectorMetricServiceImpl extends BaseConnectorMetricService implements ConnectorMetricService { +public class ConnectorMetricServiceImpl extends BaseConnectMetricService implements ConnectorMetricService { protected static final ILog LOGGER = LogFactory.getLog(ConnectorMetricServiceImpl.class); public static final String CONNECTOR_METHOD_DO_NOTHING = "doNothing"; @@ -67,6 +68,8 @@ public class ConnectorMetricServiceImpl extends BaseConnectorMetricService imple public static final String CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE = "getMetricHealthScore"; + public static final String CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS = "getMetricRunningStatus"; + @Autowired private ConnectorMetricESDAO connectorMetricESDAO; @@ -98,11 +101,12 @@ protected List listMetricPOFields() { @Override protected void initRegisterVCHandler() { registerVCHandler(CONNECTOR_METHOD_DO_NOTHING, this::doNothing); - registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum); - registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg); - registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax); - registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum); - registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore); + registerVCHandler(CONNECTOR_METHOD_GET_CONNECT_WORKER_METRIC_SUM, this::getConnectWorkerMetricSum); + registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_AVG, this::getConnectorTaskMetricsAvg); + registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_MAX, this::getConnectorTaskMetricsMax); + registerVCHandler(CONNECTOR_METHOD_GET_CONNECTOR_TASK_METRICS_SUM, this::getConnectorTaskMetricsSum); + registerVCHandler(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE, this::getMetricHealthScore); + registerVCHandler(CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS, this::getMetricRunningStatus); } @Override @@ -111,8 +115,7 @@ public Result collectConnectClusterMetricsFromKafkaWithCacheFi Float keyValue = CollectedMetricsLocalCache.getConnectorMetrics(connectorMetricKey); if (null != keyValue) { - ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterPhyId, connectorName, metric, keyValue); - return Result.buildSuc(connectorMetrics); + return Result.buildSuc(new ConnectorMetrics(connectClusterPhyId, connectorName, metric, keyValue)); } Result ret = this.collectConnectClusterMetricsFromKafka(connectClusterPhyId, connectorName, metric); @@ -216,6 +219,20 @@ private Result getMetricHealthScore(VersionItemParam metricPar return Result.buildSuc(metrics); } + private Result getMetricRunningStatus(VersionItemParam metricParam) { + ConnectorMetricParam param = (ConnectorMetricParam) metricParam; + Long connectClusterId = param.getConnectClusterId(); + String connectorName = param.getConnectorName(); + String metricName = param.getMetricName(); + + ConnectorPO connector = connectorService.getConnectorFromDB(connectClusterId, connectorName); + if (connector == null) { + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metricName, (float)ConnectStatusEnum.UNKNOWN.getStatus())); + } + + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metricName, (float)ConnectStatusEnum.getByValue(connector.getState()).getStatus())); + } + private Result getConnectWorkerMetricSum(VersionItemParam metricParam) { ConnectorMetricParam param = (ConnectorMetricParam) metricParam; Long connectClusterId = param.getConnectClusterId(); @@ -240,12 +257,16 @@ private Result getConnectWorkerMetricSum(VersionItemParam metr if (!isCollected) { return Result.buildFailure(NOT_EXIST); } - return Result.buildSuc(ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum)); + + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum)); } //kafka.connect:type=connect-worker-metrics,connector="{connector}" 指标 private Result getConnectorMetric(Long connectClusterId, String workerId, String connectorName, String metric, ConnectorTypeEnum connectorType) { VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric); + if (null == jmxInfo) { + return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST); + } if (jmxInfo.getType() != null) { if (connectorType == null) { @@ -257,9 +278,6 @@ private Result getConnectorMetric(Long connectClusterId, Strin } } - if (null == jmxInfo) { - return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST); - } String jmxObjectName = String.format(jmxInfo.getJmxObjectName(), connectorName); JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId); @@ -270,8 +288,7 @@ private Result getConnectorMetric(Long connectClusterId, Strin try { //2、获取jmx指标 String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString(); - ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, Float.valueOf(value)); - return Result.buildSuc(connectorMetrics); + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, Float.valueOf(value))); } catch (InstanceNotFoundException e) { // 忽略该错误,该错误出现的原因是该指标在JMX中不存在 return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName)); @@ -296,8 +313,7 @@ private Result getConnectorTaskMetricsAvg(VersionItemParam met } Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get(); - ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum / ret.getData().size()); - return Result.buildSuc(connectorMetrics); + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum / ret.getData().size())); } private Result getConnectorTaskMetricsMax(VersionItemParam metricParam){ @@ -313,8 +329,7 @@ private Result getConnectorTaskMetricsMax(VersionItemParam met } Float max = ret.getData().stream().max((a, b) -> a.getMetric(metric).compareTo(b.getMetric(metric))).get().getMetric(metric); - ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, max); - return Result.buildSuc(connectorMetrics); + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, max)); } private Result getConnectorTaskMetricsSum(VersionItemParam metricParam){ @@ -330,8 +345,7 @@ private Result getConnectorTaskMetricsSum(VersionItemParam met } Float sum = ret.getData().stream().map(elem -> elem.getMetric(metric)).reduce(Float::sum).get(); - ConnectorMetrics connectorMetrics = ConnectorMetrics.initWithMetric(connectClusterId, connectorName, metric, sum); - return Result.buildSuc(connectorMetrics); + return Result.buildSuc(new ConnectorMetrics(connectClusterId, connectorName, metric, sum)); } @@ -358,6 +372,9 @@ private Result> getConnectorTaskMetricList(Long conne private Result getConnectorTaskMetric(Long connectClusterId, String workerId, String connectorName, Integer taskId, String metric, ConnectorTypeEnum connectorType) { VersionConnectJmxInfo jmxInfo = getJMXInfo(connectClusterId, metric); + if (null == jmxInfo) { + return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST); + } if (jmxInfo.getType() != null) { if (connectorType == null) { @@ -369,9 +386,6 @@ private Result getConnectorTaskMetric(Long connectClusterI } } - if (null == jmxInfo) { - return Result.buildFailure(VC_ITEM_JMX_NOT_EXIST); - } String jmxObjectName=String.format(jmxInfo.getJmxObjectName(), connectorName, taskId); JmxConnectorWrap jmxConnectorWrap = connectJMXClient.getClientWithCheck(connectClusterId, workerId); @@ -382,8 +396,7 @@ private Result getConnectorTaskMetric(Long connectClusterI try { //2、获取jmx指标 String value = jmxConnectorWrap.getAttribute(new ObjectName(jmxObjectName), jmxInfo.getJmxAttribute()).toString(); - ConnectorTaskMetrics connectorTaskMetrics = ConnectorTaskMetrics.initWithMetric(connectClusterId, connectorName, taskId, metric, Float.valueOf(value)); - return Result.buildSuc(connectorTaskMetrics); + return Result.buildSuc(new ConnectorTaskMetrics(connectClusterId, connectorName, taskId, metric, Float.valueOf(value))); } catch (Exception e) { LOGGER.error("method=getConnectorTaskMetric||connectClusterId={}||workerId={}||connectorName={}||taskId={}||metrics={}||jmx={}||msg={}", connectClusterId, workerId, connectorName, taskId, metric, jmxObjectName, e.getClass().getName()); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java index 133355a84..74c298b50 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/ConnectorServiceImpl.java @@ -3,7 +3,6 @@ import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; -import com.didiglobal.logi.security.common.dto.oplog.OplogDTO; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo; @@ -13,19 +12,14 @@ import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO; import com.xiaojukeji.know.streaming.km.common.component.RestTool; import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; -import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant; import com.xiaojukeji.know.streaming.km.common.converter.ConnectConverter; import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectorTypeEnum; -import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum; -import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum; -import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; -import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import com.xiaojukeji.know.streaming.km.common.utils.Triple; +import com.xiaojukeji.know.streaming.km.common.utils.Tuple; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; -import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO; import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo; import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo; @@ -34,14 +28,9 @@ import org.springframework.stereotype.Service; import java.util.*; -import java.util.stream.Collectors; - -import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME; -import static com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant.MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME; -import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR; @Service -public class ConnectorServiceImpl extends BaseVersionControlService implements ConnectorService { +public class ConnectorServiceImpl implements ConnectorService { private static final ILog LOGGER = LogFactory.getLog(ConnectorServiceImpl.class); @Autowired @@ -53,79 +42,14 @@ public class ConnectorServiceImpl extends BaseVersionControlService implements C @Autowired private ConnectClusterService connectClusterService; - @Autowired - private OpLogWrapService opLogWrapService; - private static final String LIST_CONNECTORS_URI = "/connectors"; private static final String GET_CONNECTOR_INFO_PREFIX_URI = "/connectors"; private static final String GET_CONNECTOR_TOPICS_URI = "/connectors/%s/topics"; private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status"; - private static final String CREATE_CONNECTOR_URI = "/connectors"; - private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume"; - private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart"; - private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause"; - private static final String DELETE_CONNECTOR_URI = "/connectors/%s"; - private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config"; - @Override - protected VersionItemTypeEnum getVersionItemType() { - return SERVICE_OP_CONNECT_CONNECTOR; - } - - @Override - public Result createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) { + public Result> listConnectorsFromCluster(ConnectCluster connectCluster) { try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - // 构造参数 - Properties props = new Properties(); - props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName); - props.put("config", configs); - - ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent( - connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI, - props, - ConnectorInfo.class - ); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.ADD.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - ConvertUtil.obj2Json(configs) - )); - - KSConnectorInfo connector = new KSConnectorInfo(); - connector.setConnectClusterId(connectClusterId); - connector.setConfig(connectorInfo.config()); - connector.setName(connectorInfo.name()); - connector.setTasks(connectorInfo.tasks()); - connector.setType(connectorInfo.type()); - - return Result.buildSuc(connector); - } catch (Exception e) { - LOGGER.error( - "method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception", - connectClusterId, connectorName, configs, operator, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public Result> listConnectorsFromCluster(Long connectClusterId) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - List nameList = restTool.getArrayObjectWithJsonContent( connectCluster.getSuitableRequestUrl() + LIST_CONNECTORS_URI, new HashMap<>(), @@ -135,8 +59,8 @@ public Result> listConnectorsFromCluster(Long connectClusterId) { return Result.buildSuc(nameList); } catch (Exception e) { LOGGER.error( - "method=listConnectorsFromCluster||connectClusterId={}||errMsg=exception", - connectClusterId, e + "method=listConnectorsFromCluster||connectClusterId={}||connectClusterSuitableUrl={}||errMsg=exception", + connectCluster.getId(), connectCluster.getSuitableRequestUrl(), e ); return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); @@ -153,16 +77,6 @@ public Result getConnectorInfoFromCluster(Long connectClusterId return this.getConnectorInfoFromCluster(connectCluster, connectorName); } - @Override - public Result> getConnectorTopicsFromCluster(Long connectClusterId, String connectorName) { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - return this.getConnectorTopicsFromCluster(connectCluster, connectorName); - } - @Override public Result getConnectorStateInfoFromCluster(Long connectClusterId, String connectorName) { ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); @@ -174,270 +88,26 @@ public Result getConnectorStateInfoFromCluster(Long connec } @Override - public Result getAllConnectorInfoFromCluster(Long connectClusterId, String connectorName) { + public Result getConnectorFromKafka(Long connectClusterId, String connectorName) { ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); if (ValidateUtils.isNull(connectCluster)) { return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); } - Result connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName); - if (connectorResult.failed()) { - LOGGER.error( - "method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}", - connectClusterId, connectorName, connectorResult - ); - - return Result.buildFromIgnoreData(connectorResult); - } - - Result> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName); - if (topicNameListResult.failed()) { - LOGGER.error( - "method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}", - connectClusterId, connectorName, connectorResult - ); - } - - Result stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName); - if (stateInfoResult.failed()) { - LOGGER.error( - "method=getAllConnectorInfoFromCluster||connectClusterId={}||connectorName={}||result={}", - connectClusterId, connectorName, connectorResult - ); + Result, KSConnectorStateInfo>> fullInfoResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName); + if (fullInfoResult.failed()) { + return Result.buildFromIgnoreData(fullInfoResult); } return Result.buildSuc(ConnectConverter.convert2KSConnector( connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), - connectorResult.getData(), - stateInfoResult.getData(), - topicNameListResult.getData() + fullInfoResult.getData().v1(), + fullInfoResult.getData().v3(), + fullInfoResult.getData().v2() )); } - @Override - public Result resumeConnector(Long connectClusterId, String connectorName, String operator) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - restTool.putJsonForObject( - connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName), - new HashMap<>(), - String.class - ); - - this.updateStatus(connectCluster, connectClusterId, connectorName); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.ENABLE.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - "" - )); - - return Result.buildSuc(); - } catch (Exception e) { - LOGGER.error( - "class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception", - connectClusterId, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public Result restartConnector(Long connectClusterId, String connectorName, String operator) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - restTool.postObjectWithJsonContent( - connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName), - new HashMap<>(), - String.class - ); - - this.updateStatus(connectCluster, connectClusterId, connectorName); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.RESTART.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - "" - )); - - return Result.buildSuc(); - } catch (Exception e) { - LOGGER.error( - "method=restartConnector||connectClusterId={}||errMsg=exception", - connectClusterId, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public Result stopConnector(Long connectClusterId, String connectorName, String operator) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - restTool.putJsonForObject( - connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName), - new HashMap<>(), - String.class - ); - - this.updateStatus(connectCluster, connectClusterId, connectorName); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.DISABLE.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - "" - )); - - return Result.buildSuc(); - } catch (Exception e) { - LOGGER.error( - "method=stopConnector||connectClusterId={}||errMsg=exception", - connectClusterId, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public Result deleteConnector(Long connectClusterId, String connectorName, String operator) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - restTool.deleteWithParamsAndHeader( - connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName), - new HashMap<>(), - new HashMap<>(), - String.class - ); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.DELETE.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - "" - )); - - this.deleteConnectorInDB(connectClusterId, connectorName); - - return Result.buildSuc(); - } catch (Exception e) { - LOGGER.error( - "method=deleteConnector||connectClusterId={}||errMsg=exception", - connectClusterId, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public Result updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) { - try { - ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); - if (ValidateUtils.isNull(connectCluster)) { - return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); - } - - ConnectorInfo connectorInfo = restTool.putJsonForObject( - connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName), - configs, - org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo.class - ); - - this.updateStatus(connectCluster, connectClusterId, connectorName); - - opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( - operator, - OperationEnum.EDIT.getDesc(), - ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), - MsgConstant.getConnectorBizStr(connectClusterId, connectorName), - ConvertUtil.obj2Json(configs) - )); - - return Result.buildSuc(); - } catch (Exception e) { - LOGGER.error( - "method=updateConnectorConfig||connectClusterId={}||errMsg=exception", - connectClusterId, e - ); - - return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); - } - } - - @Override - public void batchReplace(Long kafkaClusterPhyId, Long connectClusterId, List connectorList, Set allConnectorNameSet) { - List poList = this.listByConnectClusterIdFromDB(connectClusterId); - - Map oldPOMap = new HashMap<>(); - poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem)); - - for (KSConnector connector: connectorList) { - try { - ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName()); - if (oldPO == null) { - oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class); - connectorDAO.insert(oldPO); - } else { - ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class); - newPO.setId(oldPO.getId()); - connectorDAO.updateById(newPO); - } - } catch (DuplicateKeyException dke) { - // ignore - } - } - - try { - oldPOMap.values().forEach(elem -> { - if (allConnectorNameSet.contains(elem.getConnectorName())) { - // 当前connector还存在 - return; - } - - // 当前connector不存在了,则进行删除 - connectorDAO.deleteById(elem.getId()); - }); - } catch (Exception e) { - // ignore - } - } - - @Override - public void addNewToDB(KSConnector connector) { - try { - connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class)); - } catch (DuplicateKeyException dke) { - // ignore - } - } - @Override public List listByKafkaClusterIdFromDB(Long kafkaClusterPhyId) { LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); @@ -482,53 +152,98 @@ public ConnectorTypeEnum getConnectorType(Long connectClusterId, String connecto } @Override - public void completeMirrorMakerInfo(ConnectCluster connectCluster, List connectorList) { - List sourceConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_SOURCE_CONNECTOR_TYPE)).collect(Collectors.toList()); - if (sourceConnectorList.isEmpty()) { - return; + public Result, List>> getDataFromKafka(ConnectCluster connectCluster) { + Result> nameListResult = this.listConnectorsFromCluster(connectCluster); + if (nameListResult.failed()) { + return Result.buildFromIgnoreData(nameListResult); + } + + // 逐个获取 + List, KSConnectorStateInfo>> connectorFullInfoList = new ArrayList<>(); + for (String connectorName: nameListResult.getData()) { + Result, KSConnectorStateInfo>> ksConnectorResult = this.getConnectorFullInfoFromKafka(connectCluster, connectorName); + if (ksConnectorResult.failed()) { + continue; + } + + connectorFullInfoList.add(ksConnectorResult.getData()); } - List heartBeatConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_HEARTBEAT_CONNECTOR_TYPE)).collect(Collectors.toList()); - List checkpointConnectorList = connectorList.stream().filter(elem -> elem.getConnectorClassName().equals(KafkaConnectConstant.MIRROR_MAKER_CHECKPOINT_CONNECTOR_TYPE)).collect(Collectors.toList()); + // 返回结果 + return Result.buildSuc(new Tuple<>( + new HashSet<>(nameListResult.getData()), + ConnectConverter.convertAndSupplyMirrorMakerInfo(connectCluster, connectorFullInfoList)) // 转换并补充mm2相关信息 + ); + } + + @Override + public void writeToDB(Long connectClusterId, Set fullNameSet, List dataList) { + List poList = this.listByConnectClusterIdFromDB(connectClusterId); + + Map oldPOMap = new HashMap<>(); + poList.forEach(elem -> oldPOMap.put(elem.getConnectorName(), elem)); + + for (KSConnector connector: dataList) { + try { + ConnectorPO oldPO = oldPOMap.remove(connector.getConnectorName()); + if (oldPO == null) { + oldPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class); + connectorDAO.insert(oldPO); + continue; + } - Map heartbeatMap = this.buildMirrorMakerMap(connectCluster, heartBeatConnectorList); - Map checkpointMap = this.buildMirrorMakerMap(connectCluster, checkpointConnectorList); + ConnectorPO newPO = ConvertUtil.obj2Obj(connector, ConnectorPO.class); + newPO.setId(oldPO.getId()); + if (!ValidateUtils.isBlank(oldPO.getCheckpointConnectorName()) + && ValidateUtils.isBlank(newPO.getCheckpointConnectorName()) + && fullNameSet.contains(oldPO.getCheckpointConnectorName())) { + // 新的po里面没有checkpoint的信息,但是db中的数据显示有,且集群中有该connector,则保留该checkpoint数据 + newPO.setCheckpointConnectorName(oldPO.getCheckpointConnectorName()); + } - for (KSConnector sourceConnector : sourceConnectorList) { - Result ret = this.getConnectorInfoFromCluster(connectCluster, sourceConnector.getConnectorName()); + if (!ValidateUtils.isBlank(oldPO.getHeartbeatConnectorName()) + && ValidateUtils.isBlank(newPO.getHeartbeatConnectorName()) + && fullNameSet.contains(oldPO.getHeartbeatConnectorName())) { + // 新的po里面没有checkpoint的信息,但是db中的数据显示有,且集群中有该connector,则保留该checkpoint数据 + newPO.setHeartbeatConnectorName(oldPO.getHeartbeatConnectorName()); + } - if (!ret.hasData()) { + connectorDAO.updateById(newPO); + } catch (DuplicateKeyException dke) { + // ignore + } catch (Exception e) { LOGGER.error( - "method=completeMirrorMakerInfo||connectClusterId={}||connectorName={}||get connectorInfo fail!", - connectCluster.getId(), sourceConnector.getConnectorName() + "method=writeToDB||connectClusterId={}||connectorName={}||errMsg=exception", + connector.getConnectClusterId(), connector.getConnectorName(), e ); - continue; } - KSConnectorInfo ksConnectorInfo = ret.getData(); - String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); - String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); + } - if (ValidateUtils.anyBlank(targetServers, sourceServers)) { - continue; - } + try { + oldPOMap.values().forEach(elem -> { + if (fullNameSet.contains(elem.getConnectorName())) { + // 当前connector还存在 + return; + } - String[] targetBrokerList = getBrokerList(targetServers); - String[] sourceBrokerList = getBrokerList(sourceServers); - sourceConnector.setHeartbeatConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, heartbeatMap)); - sourceConnector.setCheckpointConnectorName(this.findBindConnector(targetBrokerList, sourceBrokerList, checkpointMap)); + // 当前connector不存在了,则进行删除 + connectorDAO.deleteById(elem.getId()); + }); + } catch (Exception e) { + // ignore } - } - /**************************************************** private method ****************************************************/ - private int deleteConnectorInDB(Long connectClusterId, String connectorName) { + @Override + public int deleteInDBByKafkaClusterId(Long clusterPhyId) { LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); - lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId); - lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName); + lambdaQueryWrapper.eq(ConnectorPO::getKafkaClusterPhyId, clusterPhyId); return connectorDAO.delete(lambdaQueryWrapper); } + /**************************************************** private method ****************************************************/ + private Result getConnectorInfoFromCluster(ConnectCluster connectCluster, String connectorName) { try { ConnectorInfo connectorInfo = restTool.getForObject( @@ -594,90 +309,37 @@ private Result getConnectorStateInfoFromCluster(ConnectClu } } - private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) { - try { - // 延迟3秒 - BackoffUtils.backoff(2000); - - Result stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName); - if (stateInfoResult.failed()) { - return; - } - - ConnectorPO po = new ConnectorPO(); - po.setConnectClusterId(connectClusterId); - po.setConnectorName(connectorName); - po.setState(stateInfoResult.getData().getConnector().getState()); - - LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); - lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId); - lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName); - - connectorDAO.update(po, lambdaQueryWrapper); - } catch (Exception e) { + private Result, KSConnectorStateInfo>> getConnectorFullInfoFromKafka(ConnectCluster connectCluster, String connectorName) { + Result connectorResult = this.getConnectorInfoFromCluster(connectCluster, connectorName); + if (connectorResult.failed()) { LOGGER.error( - "method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception", - connectClusterId, connectorName, e + "method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors info from cluster failed", + connectCluster.getId(), connectCluster.getSuitableRequestUrl(), connectorResult ); - } - } - - private Map buildMirrorMakerMap(ConnectCluster connectCluster, List ksConnectorList) { - Map bindMap = new HashMap<>(); - - for (KSConnector ksConnector : ksConnectorList) { - Result ret = this.getConnectorInfoFromCluster(connectCluster, ksConnector.getConnectorName()); - - if (!ret.hasData()) { - LOGGER.error( - "method=buildMirrorMakerMap||connectClusterId={}||connectorName={}||get connectorInfo fail!", - connectCluster.getId(), ksConnector.getConnectorName() - ); - continue; - } - - KSConnectorInfo ksConnectorInfo = ret.getData(); - String targetServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_TARGET_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); - String sourceServers = ksConnectorInfo.getConfig().get(MIRROR_MAKER_SOURCE_CLUSTER_BOOTSTRAP_SERVERS_FIELD_NAME); - - if (ValidateUtils.anyBlank(targetServers, sourceServers)) { - continue; - } - - String[] targetBrokerList = getBrokerList(targetServers); - String[] sourceBrokerList = getBrokerList(sourceServers); - for (String targetBroker : targetBrokerList) { - for (String sourceBroker : sourceBrokerList) { - bindMap.put(targetBroker + "@" + sourceBroker, ksConnector.getConnectorName()); - } - } + return Result.buildFromIgnoreData(connectorResult); } - return bindMap; - } - private String findBindConnector(String[] targetBrokerList, String[] sourceBrokerList, Map connectorBindMap) { - for (String targetBroker : targetBrokerList) { - for (String sourceBroker : sourceBrokerList) { - String connectorName = connectorBindMap.get(targetBroker + "@" + sourceBroker); - if (connectorName != null) { - return connectorName; - } - } + Result> topicNameListResult = this.getConnectorTopicsFromCluster(connectCluster, connectorName); + if (topicNameListResult.failed()) { + LOGGER.error( + "method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors topics from cluster failed", + connectCluster.getId(), connectCluster.getSuitableRequestUrl(), topicNameListResult + ); } - return ""; - } - private String[] getBrokerList(String str) { - if (ValidateUtils.isBlank(str)) { - return new String[0]; - } - if (str.contains(";")) { - return str.split(";"); - } - if (str.contains(",")) { - return str.split(","); + Result stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName); + if (stateInfoResult.failed()) { + LOGGER.error( + "method=getConnectorAllInfoFromKafka||connectClusterId={}||connectClusterSuitableUrl={}||result={}||errMsg=get connectors state from cluster failed", + connectCluster.getId(), connectCluster.getSuitableRequestUrl(), stateInfoResult + ); } - return new String[]{str}; + + return Result.buildSuc(new Triple<>( + connectorResult.getData(), + topicNameListResult.getData(), + stateInfoResult.getData() + )); } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/OpConnectorServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/OpConnectorServiceImpl.java new file mode 100644 index 000000000..df0e96330 --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/connector/impl/OpConnectorServiceImpl.java @@ -0,0 +1,352 @@ +package com.xiaojukeji.know.streaming.km.core.service.connect.connector.impl; + +import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.didiglobal.logi.security.common.dto.oplog.OplogDTO; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorInfo; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnectorStateInfo; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; +import com.xiaojukeji.know.streaming.km.common.bean.po.connect.ConnectorPO; +import com.xiaojukeji.know.streaming.km.common.component.RestTool; +import com.xiaojukeji.know.streaming.km.common.constant.MsgConstant; +import com.xiaojukeji.know.streaming.km.common.constant.connect.KafkaConnectConstant; +import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.ModuleEnum; +import com.xiaojukeji.know.streaming.km.common.enums.operaterecord.OperationEnum; +import com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum; +import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; +import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; +import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; +import com.xiaojukeji.know.streaming.km.core.service.connect.cluster.ConnectClusterService; +import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService; +import com.xiaojukeji.know.streaming.km.core.service.oprecord.OpLogWrapService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseVersionControlService; +import com.xiaojukeji.know.streaming.km.persistence.mysql.connect.ConnectorDAO; +import org.apache.kafka.connect.runtime.rest.entities.ConnectorInfo; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.dao.DuplicateKeyException; +import org.springframework.stereotype.Service; + +import java.util.*; + +import static com.xiaojukeji.know.streaming.km.common.enums.version.VersionItemTypeEnum.SERVICE_OP_CONNECT_CONNECTOR; + +@Service +public class OpConnectorServiceImpl extends BaseVersionControlService implements OpConnectorService { + private static final ILog LOGGER = LogFactory.getLog(OpConnectorServiceImpl.class); + + @Autowired + private RestTool restTool; + + @Autowired + private ConnectorDAO connectorDAO; + + @Autowired + private ConnectClusterService connectClusterService; + + @Autowired + private OpLogWrapService opLogWrapService; + + private static final String GET_CONNECTOR_STATUS_URI = "/connectors/%s/status"; + + private static final String CREATE_CONNECTOR_URI = "/connectors"; + private static final String RESUME_CONNECTOR_URI = "/connectors/%s/resume"; + private static final String RESTART_CONNECTOR_URI = "/connectors/%s/restart"; + private static final String PAUSE_CONNECTOR_URI = "/connectors/%s/pause"; + private static final String DELETE_CONNECTOR_URI = "/connectors/%s"; + private static final String UPDATE_CONNECTOR_CONFIG_URI = "/connectors/%s/config"; + + @Override + protected VersionItemTypeEnum getVersionItemType() { + return SERVICE_OP_CONNECT_CONNECTOR; + } + + @Override + public Result createConnector(Long connectClusterId, String connectorName, Properties configs, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + // 构造参数 + Properties props = new Properties(); + props.put(KafkaConnectConstant.MIRROR_MAKER_NAME_FIELD_NAME, connectorName); + props.put("config", configs); + + ConnectorInfo connectorInfo = restTool.postObjectWithJsonContent( + connectCluster.getSuitableRequestUrl() + CREATE_CONNECTOR_URI, + props, + ConnectorInfo.class + ); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.ADD.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + ConvertUtil.obj2Json(configs) + )); + + KSConnectorInfo connector = new KSConnectorInfo(); + connector.setConnectClusterId(connectClusterId); + connector.setConfig(connectorInfo.config()); + connector.setName(connectorInfo.name()); + connector.setTasks(connectorInfo.tasks()); + connector.setType(connectorInfo.type()); + + return Result.buildSuc(connector); + } catch (Exception e) { + LOGGER.error( + "method=createConnector||connectClusterId={}||connectorName={}||configs={}||operator={}||errMsg=exception", + connectClusterId, connectorName, configs, operator, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public Result resumeConnector(Long connectClusterId, String connectorName, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + restTool.putJsonForObject( + connectCluster.getSuitableRequestUrl() + String.format(RESUME_CONNECTOR_URI, connectorName), + new HashMap<>(), + String.class + ); + + this.updateStatus(connectCluster, connectClusterId, connectorName); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.ENABLE.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + "" + )); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "class=ConnectorServiceImpl||method=resumeConnector||connectClusterId={}||errMsg=exception", + connectClusterId, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public Result restartConnector(Long connectClusterId, String connectorName, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + restTool.postObjectWithJsonContent( + connectCluster.getSuitableRequestUrl() + String.format(RESTART_CONNECTOR_URI, connectorName), + new HashMap<>(), + String.class + ); + + this.updateStatus(connectCluster, connectClusterId, connectorName); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.RESTART.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + "" + )); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "method=restartConnector||connectClusterId={}||errMsg=exception", + connectClusterId, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public Result stopConnector(Long connectClusterId, String connectorName, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + restTool.putJsonForObject( + connectCluster.getSuitableRequestUrl() + String.format(PAUSE_CONNECTOR_URI, connectorName), + new HashMap<>(), + String.class + ); + + this.updateStatus(connectCluster, connectClusterId, connectorName); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.DISABLE.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + "" + )); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "method=stopConnector||connectClusterId={}||errMsg=exception", + connectClusterId, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public Result deleteConnector(Long connectClusterId, String connectorName, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + restTool.deleteWithParamsAndHeader( + connectCluster.getSuitableRequestUrl() + String.format(DELETE_CONNECTOR_URI, connectorName), + new HashMap<>(), + new HashMap<>(), + String.class + ); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.DELETE.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + "" + )); + + this.deleteConnectorInDB(connectClusterId, connectorName); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "method=deleteConnector||connectClusterId={}||errMsg=exception", + connectClusterId, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public Result updateConnectorConfig(Long connectClusterId, String connectorName, Properties configs, String operator) { + try { + ConnectCluster connectCluster = connectClusterService.getById(connectClusterId); + if (ValidateUtils.isNull(connectCluster)) { + return Result.buildFromRSAndMsg(ResultStatus.NOT_EXIST, MsgConstant.getConnectClusterNotExist(connectClusterId)); + } + + ConnectorInfo connectorInfo = restTool.putJsonForObject( + connectCluster.getSuitableRequestUrl() + String.format(UPDATE_CONNECTOR_CONFIG_URI, connectorName), + configs, + ConnectorInfo.class + ); + + this.updateStatus(connectCluster, connectClusterId, connectorName); + + opLogWrapService.saveOplogAndIgnoreException(new OplogDTO( + operator, + OperationEnum.EDIT.getDesc(), + ModuleEnum.KAFKA_CONNECT_CONNECTOR.getDesc(), + MsgConstant.getConnectorBizStr(connectClusterId, connectorName), + ConvertUtil.obj2Json(configs) + )); + + return Result.buildSuc(); + } catch (Exception e) { + LOGGER.error( + "method=updateConnectorConfig||connectClusterId={}||errMsg=exception", + connectClusterId, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + @Override + public void addNewToDB(KSConnector connector) { + try { + connectorDAO.insert(ConvertUtil.obj2Obj(connector, ConnectorPO.class)); + } catch (DuplicateKeyException dke) { + // ignore + } + } + + /**************************************************** private method ****************************************************/ + private int deleteConnectorInDB(Long connectClusterId, String connectorName) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId); + lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName); + + return connectorDAO.delete(lambdaQueryWrapper); + } + + private Result getConnectorStateInfoFromCluster(ConnectCluster connectCluster, String connectorName) { + try { + KSConnectorStateInfo connectorStateInfo = restTool.getForObject( + connectCluster.getSuitableRequestUrl() + String.format(GET_CONNECTOR_STATUS_URI, connectorName), + new HashMap<>(), + KSConnectorStateInfo.class + ); + + return Result.buildSuc(connectorStateInfo); + } catch (Exception e) { + LOGGER.error( + "method=getConnectorStateInfoFromCluster||connectClusterId={}||connectorName={}||errMsg=exception", + connectCluster.getId(), connectorName, e + ); + + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_CONNECTOR_READ_FAILED, e.getMessage()); + } + } + + private void updateStatus(ConnectCluster connectCluster, Long connectClusterId, String connectorName) { + try { + // 延迟3秒 + BackoffUtils.backoff(2000); + + Result stateInfoResult = this.getConnectorStateInfoFromCluster(connectCluster, connectorName); + if (stateInfoResult.failed()) { + return; + } + + ConnectorPO po = new ConnectorPO(); + po.setConnectClusterId(connectClusterId); + po.setConnectorName(connectorName); + po.setState(stateInfoResult.getData().getConnector().getState()); + + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(ConnectorPO::getConnectClusterId, connectClusterId); + lambdaQueryWrapper.eq(ConnectorPO::getConnectorName, connectorName); + + connectorDAO.update(po, lambdaQueryWrapper); + } catch (Exception e) { + LOGGER.error( + "method=updateStatus||connectClusterId={}||connectorName={}||errMsg=exception", + connectClusterId, connectorName, e + ); + } + } +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/mm2/impl/MirrorMakerMetricServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/mm2/impl/MirrorMakerMetricServiceImpl.java index 83242841d..2361be963 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/mm2/impl/MirrorMakerMetricServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/connect/mm2/impl/MirrorMakerMetricServiceImpl.java @@ -27,7 +27,7 @@ import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.mm2.MirrorMakerMetricService; import com.xiaojukeji.know.streaming.km.core.service.health.state.HealthStateService; -import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectorMetricService; +import com.xiaojukeji.know.streaming.km.core.service.version.BaseConnectMetricService; import com.xiaojukeji.know.streaming.km.persistence.connect.ConnectJMXClient; import org.springframework.beans.factory.annotation.Autowired; import com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.mm2.MirrorMakerMetricESDAO; @@ -49,7 +49,7 @@ * @date 2022/12/15 */ @Service -public class MirrorMakerMetricServiceImpl extends BaseConnectorMetricService implements MirrorMakerMetricService { +public class MirrorMakerMetricServiceImpl extends BaseConnectMetricService implements MirrorMakerMetricService { protected static final ILog LOGGER = LogFactory.getLog(MirrorMakerMetricServiceImpl.class); public static final String MIRROR_MAKER_METHOD_DO_NOTHING = "doNothing"; @@ -190,7 +190,7 @@ protected List metricMap2VO(Long connectClusterId, multiLinesVO.setMetricLines(metricLines); multiLinesVOS.add(multiLinesVO); - }catch (Exception e){ + } catch (Exception e){ LOGGER.error("method=metricMap2VO||connectClusterId={}||msg=exception!", connectClusterId, e); } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java index 21511a96b..5bfb85baa 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/group/impl/GroupServiceImpl.java @@ -78,6 +78,7 @@ public List listGroupsFromKafka(ClusterPhy clusterPhy) throws AdminOpera } props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers()); + props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis())); adminClient = KSPartialKafkaAdminClient.create(props); KSListGroupsResult listConsumerGroupsResult = adminClient.listConsumerGroups( @@ -178,6 +179,7 @@ public KSGroupDescription getGroupDescriptionFromKafka(ClusterPhy clusterPhy, St } props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterPhy.getBootstrapServers()); + props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("KSPartialAdminClient||clusterPhyId=%d||timestamp=%d", clusterPhy.getId(), System.currentTimeMillis())); adminClient = KSPartialKafkaAdminClient.create(props); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/meta/MetaDataService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/meta/MetaDataService.java new file mode 100644 index 000000000..b1c34dbf6 --- /dev/null +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/meta/MetaDataService.java @@ -0,0 +1,51 @@ +package com.xiaojukeji.know.streaming.km.core.service.meta; + +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; +import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster; +import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.common.utils.Tuple; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +/** + * Kafka元信息服务接口 + */ +public interface MetaDataService { + /** + * 从Kafka中获取数据 + * @param connectCluster connect集群 + * @return 全部资源列表, 成功的资源列表 + */ + default Result, List>> getDataFromKafka(ConnectCluster connectCluster) { return Result.buildSuc(new Tuple<>(new HashSet<>(), new ArrayList<>())); } + + /** + * 从Kafka中获取数据 + * @param clusterPhy kafka集群 + * @return 全部资源集合, 成功的资源列表 + */ + default Result> getDataFromKafka(ClusterPhy clusterPhy) { return Result.buildSuc(new ArrayList<>()); } + + /** + * 元信息同步至DB中 + * @param clusterId 集群ID + * @param fullResSet 全部资源列表 + * @param dataList 成功的资源列表 + */ + default void writeToDB(Long clusterId, Set fullResSet, List dataList) {} + + /** + * 元信息同步至DB中 + * @param clusterId 集群ID + * @param dataList 成功的资源列表 + */ + default void writeToDB(Long clusterId, List dataList) {} + + /** + * 依据kafka集群ID删除数据 + * @param clusterPhyId kafka集群ID + */ + int deleteInDBByKafkaClusterId(Long clusterPhyId); +} diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java index 838ac5949..8fafdec53 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/partition/impl/OpPartitionServiceImpl.java @@ -19,6 +19,7 @@ import org.apache.kafka.clients.admin.ElectLeadersResult; import org.apache.kafka.common.ElectionType; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.ElectionNotNeededException; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import scala.jdk.javaapi.CollectionConverters; @@ -108,12 +109,17 @@ private Result preferredReplicaElectionByKafkaClient(VersionItemParam item return Result.buildSuc(); } catch (Exception e) { + if(e.getCause() instanceof ElectionNotNeededException) { + // ignore ElectionNotNeededException + return Result.buildSuc(); + } + LOGGER.error( "method=preferredReplicaElectionByKafkaClient||clusterPhyId={}||errMsg=exception", partitionParam.getClusterPhyId(), e ); - return Result.buildFromRSAndMsg(ResultStatus.ZK_OPERATE_FAILED, e.getMessage()); + return Result.buildFromRSAndMsg(ResultStatus.KAFKA_OPERATE_FAILED, e.getMessage()); } } } diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorMetricService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectMetricService.java similarity index 90% rename from km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorMetricService.java rename to km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectMetricService.java index febfdcf44..da424ebc3 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorMetricService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectMetricService.java @@ -9,7 +9,7 @@ * @author wyb * @date 2022/11/9 */ -public abstract class BaseConnectorMetricService extends BaseConnectorVersionControlService{ +public abstract class BaseConnectMetricService extends BaseConnectVersionControlService { private List metricNames = new ArrayList<>(); @PostConstruct diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorVersionControlService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectVersionControlService.java similarity index 95% rename from km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorVersionControlService.java rename to km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectVersionControlService.java index ced858ff6..8f4260614 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectorVersionControlService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/BaseConnectVersionControlService.java @@ -14,7 +14,7 @@ * @author wyb * @date 2022/11/8 */ -public abstract class BaseConnectorVersionControlService extends BaseVersionControlService { +public abstract class BaseConnectVersionControlService extends BaseVersionControlService { @Autowired ConnectClusterService connectClusterService; diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/connect/ConnectorMetricVersionItems.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/connect/ConnectorMetricVersionItems.java index 2d4aeac21..bcad6e3d4 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/connect/ConnectorMetricVersionItems.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/connect/ConnectorMetricVersionItems.java @@ -24,6 +24,8 @@ public class ConnectorMetricVersionItems extends BaseMetricVersionMetric { public static final String CONNECTOR_METRIC_HEALTH_STATE = "HealthState"; + public static final String CONNECTOR_METRIC_RUNNING_STATUS = "RunningStatus"; + public static final String CONNECTOR_METRIC_CONNECTOR_TOTAL_TASK_COUNT = "ConnectorTotalTaskCount"; public static final String CONNECTOR_METRIC_HEALTH_CHECK_PASSED = "HealthCheckPassed"; @@ -128,6 +130,9 @@ public List init() { items.add(buildAllVersionsItem() .name(CONNECTOR_METRIC_HEALTH_STATE).unit("0:好 1:中 2:差 3:宕机").desc("健康状态(0:好 1:中 2:差 3:宕机)").category(CATEGORY_HEALTH) .extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE)); + items.add(buildAllVersionsItem() + .name(CONNECTOR_METRIC_RUNNING_STATUS).unit("0:UNASSIGNED 1:RUNNING 2:PAUSED 3:FAILED 4:DESTROYED -1:UNKNOWN").desc("运行状态(0:UNASSIGNED 1:RUNNING 2:PAUSED 3:FAILED 4:DESTROYED -1:UNKNOWN)").category(CATEGORY_PERFORMANCE) + .extendMethod(CONNECTOR_METHOD_GET_METRIC_RUNNING_STATUS)); items.add(buildAllVersionsItem() .name(CONNECTOR_METRIC_HEALTH_CHECK_PASSED).unit("个").desc("健康项检查通过数").category(CATEGORY_HEALTH) .extendMethod(CONNECTOR_METHOD_GET_METRIC_HEALTH_SCORE)); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/kafka/ClusterMetricVersionItems.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/kafka/ClusterMetricVersionItems.java index cefe8930e..dbcdac57f 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/kafka/ClusterMetricVersionItems.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/version/metrics/kafka/ClusterMetricVersionItems.java @@ -299,7 +299,7 @@ public List init(){ // MessagesIn 指标 itemList.add( buildAllVersionsItem() - .name(CLUSTER_METRIC_MESSAGES_IN).unit("条/s").desc("集群每条消息写入条数").category(CATEGORY_CLUSTER) + .name(CLUSTER_METRIC_MESSAGES_IN).unit("条/s").desc("集群每秒消息写入条数").category(CATEGORY_CLUSTER) .extend( buildJMXMethodExtend( CLUSTER_METHOD_GET_METRIC_FROM_KAFKA_BY_TOTAL_BROKERS_JMX ) .jmxObjectName( JMX_SERVER_BROKER_MESSAGES_IN ).jmxAttribute(RATE_MIN_1))); diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/ZookeeperService.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/ZookeeperService.java index 8d3a78b10..1d324928f 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/ZookeeperService.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/ZookeeperService.java @@ -1,19 +1,11 @@ package com.xiaojukeji.know.streaming.km.core.service.zookeeper; -import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig; -import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo; +import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService; import java.util.List; -public interface ZookeeperService { - /** - * 从ZK集群中获取ZK信息 - */ - Result> listFromZookeeper(Long clusterPhyId, String zookeeperAddress, ZKConfig zkConfig); - - void batchReplaceDataInDB(Long clusterPhyId, List infoList); - +public interface ZookeeperService extends MetaDataService { List listFromDBByCluster(Long clusterPhyId); /** diff --git a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperServiceImpl.java b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperServiceImpl.java index 8b0d63d1f..dc2f58d24 100644 --- a/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperServiceImpl.java +++ b/km-core/src/main/java/com/xiaojukeji/know/streaming/km/core/service/zookeeper/impl/ZookeeperServiceImpl.java @@ -3,6 +3,7 @@ import com.baomidou.mybatisplus.core.conditions.query.LambdaQueryWrapper; import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.ResultStatus; @@ -22,10 +23,8 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; +import java.util.stream.Collectors; @Service public class ZookeeperServiceImpl implements ZookeeperService { @@ -35,14 +34,14 @@ public class ZookeeperServiceImpl implements ZookeeperService { private ZookeeperDAO zookeeperDAO; @Override - public Result> listFromZookeeper(Long clusterPhyId, String zookeeperAddress, ZKConfig zkConfig) { + public Result> getDataFromKafka(ClusterPhy clusterPhy) { List> addressList = null; try { - addressList = ZookeeperUtils.connectStringParser(zookeeperAddress); + addressList = ZookeeperUtils.connectStringParser(clusterPhy.getZookeeper()); } catch (Exception e) { LOGGER.error( - "method=listFromZookeeperCluster||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!", - clusterPhyId, zookeeperAddress, e + "method=getDataFromKafka||clusterPhyId={}||zookeeperAddress={}||errMsg=exception!", + clusterPhy.getId(), clusterPhy.getZookeeper(), e ); return Result.buildFromRSAndMsg(ResultStatus.PARAM_ILLEGAL, e.getMessage()); @@ -51,24 +50,25 @@ public Result> listFromZookeeper(Long clusterPhyId, String z List aliveZKList = new ArrayList<>(); for (Tuple hostPort: addressList) { aliveZKList.add(this.getFromZookeeperCluster( - clusterPhyId, + clusterPhy.getId(), hostPort.getV1(), hostPort.getV2(), - zkConfig + ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class) )); } + return Result.buildSuc(aliveZKList); } @Override - public void batchReplaceDataInDB(Long clusterPhyId, List infoList) { + public void writeToDB(Long clusterId, List dataList) { // DB 中的信息 - List dbInfoList = this.listRawFromDBByCluster(clusterPhyId); - Map dbMap = new HashMap<>(); - dbInfoList.stream().forEach(elem -> dbMap.put(elem.getHost() + elem.getPort(), elem)); + Map dbMap = this.listRawFromDBByCluster(clusterId) + .stream() + .collect(Collectors.toMap(elem -> elem.getHost() + elem.getPort(), elem -> elem, (oldValue, newValue) -> newValue)); // 新获取到的信息 - List newInfoList = ConvertUtil.list2List(infoList, ZookeeperInfoPO.class); + List newInfoList = ConvertUtil.list2List(dataList, ZookeeperInfoPO.class); for (ZookeeperInfoPO newInfo: newInfoList) { try { ZookeeperInfoPO oldInfo = dbMap.remove(newInfo.getHost() + newInfo.getPort()); @@ -87,7 +87,7 @@ public void batchReplaceDataInDB(Long clusterPhyId, List infoList zookeeperDAO.updateById(newInfo); } } catch (Exception e) { - LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterPhyId, newInfo, e); + LOGGER.error("method=writeToDB||clusterPhyId={}||newInfo={}||errMsg=exception", clusterId, newInfo, e); } } @@ -96,11 +96,19 @@ public void batchReplaceDataInDB(Long clusterPhyId, List infoList try { zookeeperDAO.deleteById(entry.getValue().getId()); } catch (Exception e) { - LOGGER.error("method=batchReplaceDataInDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterPhyId, entry.getValue(), e); + LOGGER.error("method=writeToDB||clusterPhyId={}||expiredInfo={}||errMsg=exception", clusterId, entry.getValue(), e); } }); } + @Override + public int deleteInDBByKafkaClusterId(Long clusterPhyId) { + LambdaQueryWrapper lambdaQueryWrapper = new LambdaQueryWrapper<>(); + lambdaQueryWrapper.eq(ZookeeperInfoPO::getClusterPhyId, clusterPhyId); + + return zookeeperDAO.delete(lambdaQueryWrapper); + } + @Override public List listFromDBByCluster(Long clusterPhyId) { return ConvertUtil.list2List(this.listRawFromDBByCluster(clusterPhyId), ZookeeperInfo.class); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java index a0e9e1c9f..4990f2a9d 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/KafkaRebalanceMain.java @@ -67,8 +67,11 @@ public void run(OptionSet options) { Properties kafkaConfig = new Properties(); kafkaConfig.setProperty(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, options.valueOf("bootstrap-servers").toString()); balanceParameter.setKafkaConfig(kafkaConfig); - balanceParameter.setEsRestURL(options.valueOf("es-rest-url").toString()); - balanceParameter.setEsIndexPrefix(options.valueOf("es-index-prefix").toString()); + if (options.has("es-password")) { + balanceParameter.setEsInfo(options.valueOf("es-rest-url").toString(), options.valueOf("es-password").toString(), options.valueOf("es-index-prefix").toString()); + } else { + balanceParameter.setEsInfo(options.valueOf("es-rest-url").toString(), "", options.valueOf("es-index-prefix").toString()); + } balanceParameter.setBeforeSeconds((Integer) options.valueOf("before-seconds")); String envFile = options.valueOf("hardware-env-file").toString(); String envJson = FileUtils.readFileToString(new File(envFile), "UTF-8"); @@ -89,6 +92,7 @@ public static void main(String[] args) { OptionParser parser = new OptionParser(); parser.accepts("bootstrap-servers", "Kafka cluster boot server").withRequiredArg().ofType(String.class); parser.accepts("es-rest-url", "The url of elasticsearch").withRequiredArg().ofType(String.class); + parser.accepts("es-password", "The password of elasticsearch").withRequiredArg().ofType(String.class); parser.accepts("es-index-prefix", "The Index Prefix of elasticsearch").withRequiredArg().ofType(String.class); parser.accepts("goals", "Balanced goals include TopicLeadersDistributionGoal,TopicReplicaDistributionGoal,DiskDistributionGoal,NetworkInboundDistributionGoal,NetworkOutboundDistributionGoal").withRequiredArg().ofType(String.class); parser.accepts("cluster", "Balanced cluster name").withRequiredArg().ofType(String.class); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java index 90dcacf53..e9c5f3fcb 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/executor/common/BalanceParameter.java @@ -10,6 +10,10 @@ public class BalanceParameter { private Properties kafkaConfig; //ES访问地址 private String esRestURL; + + //ES访问密码 + private String esPassword; + //ES存储索引前缀 private String esIndexPrefix; //均衡目标 @@ -51,8 +55,14 @@ public String getEsRestURL() { return esRestURL; } - public void setEsRestURL(String esRestURL) { + public void setEsInfo(String esRestURL, String esPassword, String esIndexPrefix) { this.esRestURL = esRestURL; + this.esPassword = esPassword; + this.esIndexPrefix = esIndexPrefix; + } + + public String getEsPassword() { + return esPassword; } public List getGoals() { @@ -147,10 +157,6 @@ public String getEsIndexPrefix() { return esIndexPrefix; } - public void setEsIndexPrefix(String esIndexPrefix) { - this.esIndexPrefix = esIndexPrefix; - } - public String getOfflineBrokers() { return offlineBrokers; } @@ -181,9 +187,11 @@ public String toString() { "cluster='" + cluster + '\'' + ", kafkaConfig=" + kafkaConfig + ", esRestURL='" + esRestURL + '\'' + + ", esPassword='" + esPassword + '\'' + ", esIndexPrefix='" + esIndexPrefix + '\'' + ", goals=" + goals + ", excludedTopics='" + excludedTopics + '\'' + + ", ignoredTopics='" + ignoredTopics + '\'' + ", offlineBrokers='" + offlineBrokers + '\'' + ", balanceBrokers='" + balanceBrokers + '\'' + ", topicReplicaThreshold=" + topicReplicaThreshold + diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java index c07bcb677..3a8009977 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/metric/elasticsearch/ElasticsearchMetricStore.java @@ -6,7 +6,10 @@ import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.MetricStore; import com.xiaojukeji.know.streaming.km.rebalance.algorithm.metric.Metrics; import org.apache.commons.io.IOUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.http.Header; import org.apache.http.HttpHost; +import org.apache.http.message.BasicHeader; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; @@ -17,9 +20,7 @@ import java.nio.charset.StandardCharsets; import java.text.DateFormat; import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Set; -import java.util.TreeSet; +import java.util.*; /** * @author leewei @@ -30,15 +31,19 @@ public class ElasticsearchMetricStore implements MetricStore { private final ObjectMapper objectMapper = new ObjectMapper(); private final String hosts; + + private final String password; + private final String indexPrefix; private final String format; - public ElasticsearchMetricStore(String hosts, String indexPrefix) { - this(hosts, indexPrefix, "yyyy-MM-dd"); + public ElasticsearchMetricStore(String hosts, String password, String indexPrefix) { + this(hosts, password, indexPrefix, "yyyy-MM-dd"); } - public ElasticsearchMetricStore(String hosts, String indexPrefix, String format) { + public ElasticsearchMetricStore(String hosts, String password, String indexPrefix, String format) { this.hosts = hosts; + this.password = password; this.indexPrefix = indexPrefix; this.format = format; } @@ -50,7 +55,17 @@ public Metrics getMetrics(String clusterName, int beforeSeconds) { String metricsQueryJson = IOUtils.resourceToString("/MetricsQuery.json", StandardCharsets.UTF_8); metricsQueryJson = metricsQueryJson.replaceAll("", Integer.toString(beforeSeconds)) .replaceAll("", clusterName); - try (RestClient restClient = RestClient.builder(toHttpHosts(this.hosts)).build()) { + + List
defaultHeaders = new ArrayList<>(); + if (StringUtils.isNotBlank(password)) { + String encode = Base64.getEncoder().encodeToString(String.format("%s", this.password).getBytes(StandardCharsets.UTF_8)); + Header header = new BasicHeader("Authorization", "Basic " + encode); + defaultHeaders.add(header); + } + + Header[] headers = new Header[defaultHeaders.size()]; + defaultHeaders.toArray(headers); + try (RestClient restClient = RestClient.builder(toHttpHosts(this.hosts)).setDefaultHeaders(headers).build()) { Request request = new Request( "GET", "/" + indices(beforeSeconds) + "/_search"); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java index 54bc0e769..70db965c0 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/model/Supplier.java @@ -25,14 +25,14 @@ public static Map subConfig(Map config, String p Map.Entry::getValue)); } - public static ClusterModel load(String clusterName, int beforeSeconds, String kafkaBootstrapServer, String esUrls, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) { + public static ClusterModel load(String clusterName, int beforeSeconds, String kafkaBootstrapServer, String esUrls, String esPassword, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) { Properties kafkaProperties = new Properties(); kafkaProperties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServer); - return load(clusterName, beforeSeconds, kafkaProperties, esUrls, esIndexPrefix, capacitiesById, ignoredTopics); + return load(clusterName, beforeSeconds, kafkaProperties, esUrls, esPassword, esIndexPrefix, capacitiesById, ignoredTopics); } - public static ClusterModel load(String clusterName, int beforeSeconds, Properties kafkaProperties, String esUrls, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) { - MetricStore store = new ElasticsearchMetricStore(esUrls, esIndexPrefix); + public static ClusterModel load(String clusterName, int beforeSeconds, Properties kafkaProperties, String esUrls, String esPassword, String esIndexPrefix, Map capacitiesById, Set ignoredTopics) { + MetricStore store = new ElasticsearchMetricStore(esUrls, esPassword, esIndexPrefix); Metrics metrics = store.getMetrics(clusterName, beforeSeconds); return load(kafkaProperties, capacitiesById, metrics, ignoredTopics); } diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java index edf9dc003..9d517fd7c 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/algorithm/utils/GoalUtils.java @@ -44,8 +44,16 @@ public static ClusterModel getInitClusterModel(BalanceParameter parameter) { capacity.setCapacity(Resource.NW_OUT, env.getNetwork()); capacities.put(env.getId(), capacity); } - return Supplier.load(parameter.getCluster(), parameter.getBeforeSeconds(), parameter.getKafkaConfig(), - parameter.getEsRestURL(), parameter.getEsIndexPrefix(), capacities, AnalyzerUtils.getSplitTopics(parameter.getIgnoredTopics())); + return Supplier.load( + parameter.getCluster(), + parameter.getBeforeSeconds(), + parameter.getKafkaConfig(), + parameter.getEsRestURL(), + parameter.getEsPassword(), + parameter.getEsIndexPrefix(), + capacities, + AnalyzerUtils.getSplitTopics(parameter.getIgnoredTopics()) + ); } public static Map getBalanceThreshold(BalanceParameter parameter, double[] clusterAvgResource) { diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/common/converter/ClusterBalanceConverter.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/common/converter/ClusterBalanceConverter.java index 907881652..2783954da 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/common/converter/ClusterBalanceConverter.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/common/converter/ClusterBalanceConverter.java @@ -1,6 +1,7 @@ package com.xiaojukeji.know.streaming.km.rebalance.common.converter; import com.xiaojukeji.know.streaming.km.common.annotations.enterprise.EnterpriseLoadReBalance; +import com.xiaojukeji.know.streaming.km.persistence.es.template.TemplateConstant; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.dto.ClusterBalanceIntervalDTO; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.dto.ClusterBalancePreviewDTO; import com.xiaojukeji.know.streaming.km.rebalance.common.bean.dto.ClusterBalanceStrategyDTO; @@ -34,13 +35,16 @@ @EnterpriseLoadReBalance public class ClusterBalanceConverter { - - public final static String PARTITION_INDEX = "ks_kafka_partition_metric"; - private ClusterBalanceConverter() { } - public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigPO configPO, Map brokerMap, Map brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List topicNames) { + public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigPO configPO, + Map brokerMap, + Map brokerSpecMap, + ClusterPhy clusterPhy, + String esUrl, + String esPassword, + List topicNames) { BalanceParameter balanceParameter = new BalanceParameter(); List clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(configPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class); @@ -63,8 +67,7 @@ public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigP balanceParameter.setGoals(goals); balanceParameter.setCluster(clusterPhy.getId().toString()); balanceParameter.setExcludedTopics(configPO.getTopicBlackList()); - balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_"); - balanceParameter.setEsRestURL(esUrl); + balanceParameter.setEsInfo(esUrl, esPassword, TemplateConstant.PARTITION_INDEX + "_"); balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet())); balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap)); balanceParameter.setBeforeSeconds(configPO.getMetricCalculationPeriod()); @@ -78,7 +81,13 @@ public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobConfigP } - public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clusterBalanceJobPO, Map brokerMap, Map brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List topicNames) { + public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clusterBalanceJobPO, + Map brokerMap, + Map brokerSpecMap, + ClusterPhy clusterPhy, + String esUrl, + String esPassword, + List topicNames) { BalanceParameter balanceParameter = new BalanceParameter(); List clusterBalanceIntervalDTOS = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBalanceIntervalJson(), ClusterBalanceIntervalDTO.class); @@ -101,8 +110,7 @@ public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clus balanceParameter.setGoals(goals); balanceParameter.setCluster(clusterPhy.getId().toString()); balanceParameter.setExcludedTopics(clusterBalanceJobPO.getTopicBlackList()); - balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_"); - balanceParameter.setEsRestURL(esUrl); + balanceParameter.setEsInfo(esUrl, esPassword, TemplateConstant.PARTITION_INDEX + "_"); balanceParameter.setBalanceBrokers(clusterBalanceJobPO.getBrokers()); balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap)); balanceParameter.setBeforeSeconds(clusterBalanceJobPO.getMetricCalculationPeriod()); @@ -116,7 +124,13 @@ public static BalanceParameter convert2BalanceParameter(ClusterBalanceJobPO clus } - public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent dto, List brokers, Map brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List topicNames) { + public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent dto, + List brokers, + Map brokerSpecMap, + ClusterPhy clusterPhy, + String esUrl, + String esPassword, + List topicNames) { BalanceParameter balanceParameter = new BalanceParameter(); List clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream() .sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList()); @@ -141,8 +155,7 @@ public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent balanceParameter.setGoals(goals); balanceParameter.setCluster(clusterPhy.getId().toString()); balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList())); - balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_"); - balanceParameter.setEsRestURL(esUrl); + balanceParameter.setEsInfo(esUrl, esPassword, TemplateConstant.PARTITION_INDEX + "_"); balanceParameter.setBalanceBrokers(CommonUtils.intSet2String(brokerMap.keySet())); balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap)); balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod()); @@ -156,7 +169,13 @@ public static BalanceParameter convert2BalanceParameter(JobClusterBalanceContent } - public static BalanceParameter convert2BalanceParameter(ClusterBalancePreviewDTO dto, Map brokerMap, Map brokerSpecMap, ClusterPhy clusterPhy, String esUrl, List topicNames) { + public static BalanceParameter convert2BalanceParameter(ClusterBalancePreviewDTO dto, + Map brokerMap, + Map brokerSpecMap, + ClusterPhy clusterPhy, + String esUrl, + String esPassword, + List topicNames) { BalanceParameter balanceParameter = new BalanceParameter(); List clusterBalanceIntervalDTOS = dto.getClusterBalanceIntervalList().stream() .sorted(Comparator.comparing(ClusterBalanceIntervalDTO::getPriority)).collect(Collectors.toList()); @@ -179,8 +198,7 @@ public static BalanceParameter convert2BalanceParameter(ClusterBalancePreviewDTO balanceParameter.setGoals(goals); balanceParameter.setCluster(clusterPhy.getId().toString()); balanceParameter.setExcludedTopics(CommonUtils.strList2String(dto.getTopicBlackList())); - balanceParameter.setEsIndexPrefix(PARTITION_INDEX + "_"); - balanceParameter.setEsRestURL(esUrl); + balanceParameter.setEsInfo(esUrl, esPassword, TemplateConstant.PARTITION_INDEX + "_"); balanceParameter.setBalanceBrokers(CommonUtils.intList2String(dto.getBrokers())); balanceParameter.setHardwareEnv(convert2ListHostEnv(brokerMap, brokerSpecMap)); balanceParameter.setBeforeSeconds(dto.getMetricCalculationPeriod()); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java index 79d229bc6..6cbecad0a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/job/ClusterBalanceJobHandler.java @@ -63,6 +63,9 @@ public class ClusterBalanceJobHandler implements JobHandler { @Value("${es.client.address:}") private String esAddress; + @Value("${es.client.pass:}") + private String esPassword; + @Autowired private ClusterBalanceJobService clusterBalanceJobService; @@ -116,7 +119,7 @@ public Result submit(Job job, String operator) { //获取任务计划 List topicNames = topicService.listRecentUpdateTopicNamesFromDB(dto.getClusterId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); - BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, topicNames); + BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); try { ExecutionRebalance executionRebalance = new ExecutionRebalance(); OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter); @@ -202,7 +205,7 @@ public Result modify(Job job, String operator) { List topicNames = topicService.listRecentUpdateTopicNamesFromDB(job.getClusterId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); JobClusterBalanceContent dto = ConvertUtil.str2ObjByJson(job.getJobData(), JobClusterBalanceContent.class); - BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, topicNames); + BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(dto, brokers, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); ExecutionRebalance executionRebalance = new ExecutionRebalance(); try { OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java index a2e826aab..addde520a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceJobServiceImpl.java @@ -67,6 +67,9 @@ public class ClusterBalanceJobServiceImpl implements ClusterBalanceJobService { @Value("${es.client.address}") private String esAddress; + @Value("${es.client.pass:}") + private String esPassword; + @Autowired private ClusterBalanceJobDao clusterBalanceJobDao; @@ -299,7 +302,7 @@ public Result verifyClusterBalanceAndUpdateStatue(Long jobId) { //更新平衡任务状态信息 List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhy.getId(), ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); Map brokerBalanceStateMap = ExecutionRebalance - .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(clusterBalanceJobPO, brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames)); + .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(clusterBalanceJobPO, brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); List oldDetails = ConvertUtil.str2ObjArrayByJson(clusterBalanceJobPO.getBrokerBalanceDetail(), ClusterBalancePlanDetail.class); List newDetails = ClusterBalanceConverter.convert2ClusterBalancePlanDetail(oldDetails, brokerBalanceStateMap); clusterBalanceJobPO.setBrokerBalanceDetail(ConvertUtil.obj2Json(newDetails)); diff --git a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java index 330990e01..22c2f200a 100644 --- a/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java +++ b/km-enterprise/km-rebalance/src/main/java/com/xiaojukeji/know/streaming/km/rebalance/core/service/impl/ClusterBalanceServiceImpl.java @@ -68,6 +68,9 @@ public class ClusterBalanceServiceImpl implements ClusterBalanceService { @Value("${es.client.address}") private String esAddress; + @Value("${es.client.pass:}") + private String esPassword; + @Autowired private JobService jobService; @@ -137,9 +140,9 @@ public Result state(Long clusterPhyId) { Map resourceDoubleMap; Map brokerBalanceStateMap; try { - resourceDoubleMap = ExecutionRebalance.getClusterAvgResourcesState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames)); + resourceDoubleMap = ExecutionRebalance.getClusterAvgResourcesState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); brokerBalanceStateMap = ExecutionRebalance - .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames)); + .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); }catch (Exception e){ logger.error("method=state||clusterPhyId={}||errMsg=exception", clusterPhyId, e); return Result.buildFailure(e.getMessage()); @@ -189,7 +192,7 @@ public PaginationResult overview(Long clusterPhyId, Cl try { List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); brokerBalanceStateMap = ExecutionRebalance - .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames)); + .getBrokerResourcesBalanceState(ClusterBalanceConverter.convert2BalanceParameter(configPOResult.getData(), brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames)); } catch (Exception e) { logger.error("method=overview||clusterBalanceOverviewDTO={}||errMsg=exception", dto, e); return PaginationResult.buildFailure(e.getMessage(), dto); @@ -280,6 +283,7 @@ public Result getItemState(Long clusterPhyId) { brokerSpecMap, clusterPhy, esAddress, + esPassword, recentTopicNameList ) ); @@ -379,7 +383,7 @@ public Result preview(Long clusterPhyId, ClusterBalancePre //获取任务计划 Map brokerMap = allBrokers.stream().collect(Collectors.toMap(Broker::getBrokerId, Function.identity())); List topicNames = topicService.listRecentUpdateTopicNamesFromDB(clusterPhyId, ksConfigUtils.getClusterBalanceIgnoredTopicsTimeSecond()); - BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(clusterBalancePreviewDTO, brokerMap, brokerSpecMap, clusterPhy, esAddress, topicNames); + BalanceParameter balanceParameter = ClusterBalanceConverter.convert2BalanceParameter(clusterBalancePreviewDTO, brokerMap, brokerSpecMap, clusterPhy, esAddress, esPassword, topicNames); ExecutionRebalance executionRebalance = new ExecutionRebalance(); try { OptimizerResult optimizerResult = executionRebalance.optimizations(balanceParameter); diff --git a/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java b/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java index 3c0833e55..aa56d842c 100644 --- a/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java +++ b/km-extends/km-account/src/main/java/com/xiaojukeji/know/streaming/km/account/login/ldap/LdapLoginServiceImpl.java @@ -16,8 +16,8 @@ import com.xiaojukeji.know.streaming.km.account.common.bizenum.LoginServiceNameEnum; import com.xiaojukeji.know.streaming.km.account.common.ldap.LdapPrincipal; import com.xiaojukeji.know.streaming.km.account.login.ldap.remote.LdapAuthentication; +import com.xiaojukeji.know.streaming.km.common.constant.Constant; import com.xiaojukeji.know.streaming.km.common.utils.CommonUtils; -import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -79,7 +79,11 @@ public UserBriefVO verifyLogin(AccountLoginDTO loginDTO, userService.addUser(userDTO, ldapAttrsInfo.getSAMAccountName()); // user赋值 - user = ConvertUtil.obj2Obj(userDTO, User.class); + user = userService.getUserByUserName(ldapAttrsInfo.getSAMAccountName()); + } else if (ValidateUtils.isNull(user)) { + // user为空,且不自动注册用户时,赋值默认id给临时用户 + user = new User(); + user.setId(Constant.INVALID_CODE); } // 记录登录状态 diff --git a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/common/MonitorSinkTagEnum.java b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/common/MonitorSinkTagEnum.java index f78c547ad..3d8b5c25c 100644 --- a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/common/MonitorSinkTagEnum.java +++ b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/common/MonitorSinkTagEnum.java @@ -16,6 +16,11 @@ public enum MonitorSinkTagEnum { CONSUMER_GROUP("consumerGroup"), REPLICATION("replication"), + + CONNECT_CLUSTER_ID("connectClusterId"), + + CONNECT_CONNECTOR("connectConnector"), + ; private final String name; diff --git a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java index bbb316475..6d6b97747 100644 --- a/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java +++ b/km-extends/km-monitor/src/main/java/com/xiaojukeji/know/streaming/km/monitor/component/AbstractMonitorSinkService.java @@ -3,7 +3,9 @@ import com.didiglobal.logi.log.ILog; import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.*; +import com.xiaojukeji.know.streaming.km.common.bean.entity.metrics.connect.ConnectorMetrics; import com.xiaojukeji.know.streaming.km.common.bean.event.metric.*; +import com.xiaojukeji.know.streaming.km.common.bean.event.metric.connect.ConnectorMetricEvent; import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; import com.xiaojukeji.know.streaming.km.monitor.common.MetricSinkPoint; import org.springframework.context.ApplicationListener; @@ -59,6 +61,10 @@ public void onApplicationEvent(BaseMetricEvent event) { } else if(event instanceof ZookeeperMetricEvent) { ZookeeperMetricEvent zookeeperMetricEvent = (ZookeeperMetricEvent)event; sinkMetrics(zookeeperMetric2SinkPoint(zookeeperMetricEvent.getZookeeperMetrics())); + + } else if (event instanceof ConnectorMetricEvent) { + ConnectorMetricEvent connectorMetricEvent = (ConnectorMetricEvent)event; + sinkMetrics(connectConnectorMetric2SinkPoint(connectorMetricEvent.getConnectorMetricsList())); } } ); } @@ -170,6 +176,21 @@ private List zookeeperMetric2SinkPoint(List z return pointList; } + private List connectConnectorMetric2SinkPoint(List connectorMetricsList){ + List pointList = new ArrayList<>(); + + for(ConnectorMetrics metrics : connectorMetricsList){ + Map tagsMap = new HashMap<>(); + tagsMap.put(CLUSTER_ID.getName(), metrics.getClusterPhyId()); + tagsMap.put(CONNECT_CLUSTER_ID.getName(), metrics.getConnectClusterId()); + tagsMap.put(CONNECT_CONNECTOR.getName(), metrics.getConnectorName()); + + pointList.addAll(genSinkPoint("ConnectConnector", metrics.getMetrics(), metrics.getTimestamp(), tagsMap)); + } + + return pointList; + } + private List genSinkPoint(String metricPre, Map metrics, long timeStamp, diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/ConnectClusterMetricESDAO.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/cluster/ConnectClusterMetricESDAO.java similarity index 99% rename from km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/ConnectClusterMetricESDAO.java rename to km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/cluster/ConnectClusterMetricESDAO.java index 31256efea..8d9626a53 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/ConnectClusterMetricESDAO.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/es/dao/connect/cluster/ConnectClusterMetricESDAO.java @@ -1,4 +1,4 @@ -package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect; +package com.xiaojukeji.know.streaming.km.persistence.es.dao.connect.cluster; import com.didiglobal.logi.elasticsearch.client.response.query.query.ESQueryResponse; import com.didiglobal.logi.elasticsearch.client.response.query.query.aggs.ESAggr; diff --git a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminClient.java b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminClient.java index 20447798c..fccf35dd4 100644 --- a/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminClient.java +++ b/km-persistence/src/main/java/com/xiaojukeji/know/streaming/km/persistence/kafka/KafkaAdminClient.java @@ -12,6 +12,7 @@ import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; +import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -76,10 +77,12 @@ private void closeKafkaAdminClient(Long clusterPhyId) { LOGGER.info("close kafka AdminClient starting, clusterPhyId:{}", clusterPhyId); - boolean allSuccess = this.closeAdminClientList(adminClientList); + boolean allSuccess = this.closeAdminClientList(clusterPhyId, adminClientList); if (allSuccess) { LOGGER.info("close kafka AdminClient success, clusterPhyId:{}", clusterPhyId); + } else { + LOGGER.error("close kafka AdminClient exist failed and can ignore this error, clusterPhyId:{}", clusterPhyId); } } catch (Exception e) { LOGGER.error("close kafka AdminClient failed, clusterPhyId:{}", clusterPhyId, e); @@ -116,6 +119,7 @@ private AdminClient createKafkaAdminClient(Long clusterPhyId, String bootstrapSe adminClientList = new ArrayList<>(); for (int i = 0; i < clientCnt; ++i) { + props.put(AdminClientConfig.CLIENT_ID_CONFIG, String.format("ApacheAdminClient||clusterPhyId=%d||Cnt=%d", clusterPhyId, i)); adminClientList.add(AdminClient.create(props)); } @@ -125,7 +129,7 @@ private AdminClient createKafkaAdminClient(Long clusterPhyId, String bootstrapSe } catch (Exception e) { LOGGER.error("create kafka AdminClient failed, clusterPhyId:{} props:{}", clusterPhyId, props, e); - this.closeAdminClientList(adminClientList); + this.closeAdminClientList(clusterPhyId, adminClientList); } finally { modifyClientMapLock.unlock(); } @@ -133,7 +137,7 @@ private AdminClient createKafkaAdminClient(Long clusterPhyId, String bootstrapSe return KAFKA_ADMIN_CLIENT_MAP.get(clusterPhyId).get((int)(System.currentTimeMillis() % clientCnt)); } - private boolean closeAdminClientList(List adminClientList) { + private boolean closeAdminClientList(Long clusterPhyId, List adminClientList) { if (adminClientList == null) { return true; } @@ -141,9 +145,11 @@ private boolean closeAdminClientList(List adminClientList) { boolean allSuccess = true; for (AdminClient adminClient: adminClientList) { try { - adminClient.close(); + // 关闭客户端,超时时间为30秒 + adminClient.close(Duration.ofSeconds(30)); } catch (Exception e) { // ignore + LOGGER.error("close kafka AdminClient exist failed, clusterPhyId:{}", clusterPhyId, e); allSuccess = false; } } diff --git a/km-persistence/src/main/resources/sql/dml-logi.sql b/km-persistence/src/main/resources/sql/dml-logi.sql index 2beff22e1..be171d5f3 100644 --- a/km-persistence/src/main/resources/sql/dml-logi.sql +++ b/km-persistence/src/main/resources/sql/dml-logi.sql @@ -157,3 +157,7 @@ INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `l INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2046', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2048', '0', 'know-streaming'); INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2050', '0', 'know-streaming'); + +-- 多集群管理权限2023-07-18新增 +INSERT INTO `logi_security_permission` (`id`, `permission_name`, `parent_id`, `leaf`, `level`, `description`, `is_delete`, `app_name`) VALUES ('2052', 'Security-User查看密码', '1593', '1', '2', 'Security-User查看密码', '0', 'know-streaming'); +INSERT INTO `logi_security_role_permission` (`role_id`, `permission_id`, `is_delete`, `app_name`) VALUES ('1677', '2052', '0', 'know-streaming'); diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/ClusterGroupsController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/ClusterGroupsController.java index 7159dca9d..d43515b67 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/ClusterGroupsController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/cluster/ClusterGroupsController.java @@ -77,7 +77,7 @@ public Result> getClusterPhyGroupPartitions(@PathVariable @GetMapping(value = "clusters/{clusterPhyId}/groups/{groupName}/topics-overview") public PaginationResult getGroupTopicsOverview(@PathVariable Long clusterPhyId, @PathVariable String groupName, - PaginationBaseDTO dto) { + PaginationBaseDTO dto) throws Exception { return groupManager.pagingGroupTopicMembers(clusterPhyId, groupName, dto); } diff --git a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java index b03ca7cce..32d76be3b 100644 --- a/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java +++ b/km-rest/src/main/java/com/xiaojukeji/know/streaming/km/rest/api/v3/connect/KafkaConnectorController.java @@ -15,7 +15,7 @@ import com.xiaojukeji.know.streaming.km.common.enums.connect.ConnectActionEnum; import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.utils.ValidateUtils; -import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; +import com.xiaojukeji.know.streaming.km.core.service.connect.connector.OpConnectorService; import com.xiaojukeji.know.streaming.km.core.service.connect.plugin.PluginService; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; @@ -31,9 +31,8 @@ @RestController @RequestMapping(ApiPrefix.API_V3_CONNECT_PREFIX) public class KafkaConnectorController { - @Autowired - private ConnectorService connectorService; + private OpConnectorService opConnectorService; @Autowired private ConnectorManager connectorManager; @@ -56,7 +55,7 @@ public Result createConnector(@Validated @RequestBody ConnectorCreateDTO d @DeleteMapping(value ="connectors") @ResponseBody public Result deleteConnectors(@Validated @RequestBody ConnectorDeleteDTO dto) { - return connectorService.deleteConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); + return opConnectorService.deleteConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); } @ApiOperation(value = "操作Connector", notes = "") @@ -64,11 +63,11 @@ public Result deleteConnectors(@Validated @RequestBody ConnectorDeleteDTO @ResponseBody public Result operateConnectors(@Validated @RequestBody ConnectorActionDTO dto) { if (ConnectActionEnum.RESTART.getValue().equals(dto.getAction())) { - return connectorService.restartConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); + return opConnectorService.restartConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); } else if (ConnectActionEnum.STOP.getValue().equals(dto.getAction())) { - return connectorService.stopConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); + return opConnectorService.stopConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); } else if (ConnectActionEnum.RESUME.getValue().equals(dto.getAction())) { - return connectorService.resumeConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); + return opConnectorService.resumeConnector(dto.getConnectClusterId(), dto.getConnectorName(), HttpRequestUtil.getOperator()); } return Result.buildFailure(ResultStatus.PARAM_ILLEGAL); diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/connect/metadata/SyncConnectorTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/connect/metadata/SyncConnectorTask.java index 00e584251..799d7223d 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/connect/metadata/SyncConnectorTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/connect/metadata/SyncConnectorTask.java @@ -3,17 +3,15 @@ import com.didiglobal.logi.job.annotation.Task; import com.didiglobal.logi.job.common.TaskResult; import com.didiglobal.logi.job.core.consensual.ConsensualEnum; -import com.didiglobal.logi.log.ILog; -import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.ConnectCluster; import com.xiaojukeji.know.streaming.km.common.bean.entity.connect.connector.KSConnector; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; +import com.xiaojukeji.know.streaming.km.common.utils.Tuple; import com.xiaojukeji.know.streaming.km.core.service.connect.connector.ConnectorService; import org.springframework.beans.factory.annotation.Autowired; -import java.util.ArrayList; -import java.util.HashSet; import java.util.List; +import java.util.Set; @Task(name = "SyncConnectorTask", @@ -23,40 +21,21 @@ consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60) public class SyncConnectorTask extends AbstractAsyncMetadataDispatchTask { - private static final ILog LOGGER = LogFactory.getLog(SyncConnectorTask.class); - @Autowired private ConnectorService connectorService; + @Override public TaskResult processClusterTask(ConnectCluster connectCluster, long triggerTimeUnitMs) { - Result> nameListResult = connectorService.listConnectorsFromCluster(connectCluster.getId()); - if (nameListResult.failed()) { - return TaskResult.FAIL; - } - - boolean allSuccess = true; - - List connectorList = new ArrayList<>(); - for (String connectorName: nameListResult.getData()) { - Result ksConnectorResult = connectorService.getAllConnectorInfoFromCluster(connectCluster.getId(), connectorName); - if (ksConnectorResult.failed()) { - LOGGER.error( - "method=processClusterTask||connectClusterId={}||connectorName={}||result={}", - connectCluster.getId(), connectorName, ksConnectorResult - ); - - allSuccess = false; - continue; - } - - connectorList.add(ksConnectorResult.getData()); + // 获取信息 + Result, List>> dataResult = connectorService.getDataFromKafka(connectCluster); + if (dataResult.failed()) { + return new TaskResult(TaskResult.FAIL_CODE, dataResult.getMessage()); } - //mm2相关信息的添加 - connectorService.completeMirrorMakerInfo(connectCluster, connectorList); - - connectorService.batchReplace(connectCluster.getKafkaClusterPhyId(), connectCluster.getId(), connectorList, new HashSet<>(nameListResult.getData())); + // 更新到DB + connectorService.writeToDB( connectCluster.getId(), dataResult.getData().v1(), dataResult.getData().v2()); - return allSuccess? TaskResult.SUCCESS: TaskResult.FAIL; + // 返回结果 + return dataResult.getData().v1().size() == dataResult.getData().v2().size()? TaskResult.SUCCESS: TaskResult.FAIL; } } diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaAclTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaAclTask.java index 0dd5b8f7f..d0b4f3bac 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaAclTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncKafkaAclTask.java @@ -3,19 +3,13 @@ import com.didiglobal.logi.job.annotation.Task; import com.didiglobal.logi.job.common.TaskResult; import com.didiglobal.logi.job.core.consensual.ConsensualEnum; -import com.didiglobal.logi.log.ILog; -import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; -import com.xiaojukeji.know.streaming.km.common.bean.po.KafkaAclPO; -import com.xiaojukeji.know.streaming.km.common.converter.KafkaAclConverter; import com.xiaojukeji.know.streaming.km.core.service.acl.KafkaAclService; -import com.xiaojukeji.know.streaming.km.core.service.acl.OpKafkaAclService; import org.apache.kafka.common.acl.AclBinding; import org.springframework.beans.factory.annotation.Autowired; import java.util.List; -import java.util.stream.Collectors; @Task(name = "SyncKafkaAclTask", description = "KafkaAcl信息同步到DB", @@ -24,32 +18,18 @@ consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60) public class SyncKafkaAclTask extends AbstractAsyncMetadataDispatchTask { - private static final ILog log = LogFactory.getLog(SyncKafkaAclTask.class); - @Autowired private KafkaAclService kafkaAclService; - @Autowired - private OpKafkaAclService opKafkaAclService; - @Override public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { - Result> aclBindingListResult = kafkaAclService.getAclFromKafka(clusterPhy.getId()); + Result> aclBindingListResult = kafkaAclService.getDataFromKafka(clusterPhy); if (aclBindingListResult.failed()) { return TaskResult.FAIL; } - if (!aclBindingListResult.hasData()) { - return TaskResult.SUCCESS; - } - - // 更新DB数据 - List poList = aclBindingListResult.getData() - .stream() - .map(elem -> KafkaAclConverter.convert2KafkaAclPO(clusterPhy.getId(), elem, triggerTimeUnitMs)) - .collect(Collectors.toList()); + kafkaAclService.writeToDB(clusterPhy.getId(), aclBindingListResult.getData()); - opKafkaAclService.batchUpdateAcls(clusterPhy.getId(), poList); return TaskResult.SUCCESS; } } diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncZookeeperTask.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncZookeeperTask.java index e87f2bf18..4ce988d38 100644 --- a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncZookeeperTask.java +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/kafka/metadata/SyncZookeeperTask.java @@ -3,12 +3,8 @@ import com.didiglobal.logi.job.annotation.Task; import com.didiglobal.logi.job.common.TaskResult; import com.didiglobal.logi.job.core.consensual.ConsensualEnum; -import com.didiglobal.logi.log.ILog; -import com.didiglobal.logi.log.LogFactory; import com.xiaojukeji.know.streaming.km.common.bean.entity.cluster.ClusterPhy; -import com.xiaojukeji.know.streaming.km.common.bean.entity.config.ZKConfig; import com.xiaojukeji.know.streaming.km.common.bean.entity.result.Result; -import com.xiaojukeji.know.streaming.km.common.utils.ConvertUtil; import com.xiaojukeji.know.streaming.km.common.bean.entity.zookeeper.ZookeeperInfo; import com.xiaojukeji.know.streaming.km.core.service.zookeeper.ZookeeperService; import org.springframework.beans.factory.annotation.Autowired; @@ -23,24 +19,17 @@ consensual = ConsensualEnum.BROADCAST, timeout = 2 * 60) public class SyncZookeeperTask extends AbstractAsyncMetadataDispatchTask { - private static final ILog log = LogFactory.getLog(SyncZookeeperTask.class); - @Autowired private ZookeeperService zookeeperService; @Override public TaskResult processClusterTask(ClusterPhy clusterPhy, long triggerTimeUnitMs) { - Result> infoResult = zookeeperService.listFromZookeeper( - clusterPhy.getId(), - clusterPhy.getZookeeper(), - ConvertUtil.str2ObjByJson(clusterPhy.getZkProperties(), ZKConfig.class) - ); - + Result> infoResult = zookeeperService.getDataFromKafka(clusterPhy); if (infoResult.failed()) { return new TaskResult(TaskResult.FAIL_CODE, infoResult.getMessage()); } - zookeeperService.batchReplaceDataInDB(clusterPhy.getId(), infoResult.getData()); + zookeeperService.writeToDB(clusterPhy.getId(), infoResult.getData()); return TaskResult.SUCCESS; } diff --git a/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/listener/TaskClusterDeletedListener.java b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/listener/TaskClusterDeletedListener.java new file mode 100644 index 000000000..b10d61be5 --- /dev/null +++ b/km-task/src/main/java/com/xiaojukeji/know/streaming/km/task/service/listener/TaskClusterDeletedListener.java @@ -0,0 +1,53 @@ +package com.xiaojukeji.know.streaming.km.task.service.listener; + +import com.didiglobal.logi.log.ILog; +import com.didiglobal.logi.log.LogFactory; +import com.xiaojukeji.know.streaming.km.common.bean.event.cluster.connect.ClusterPhyDeletedEvent; +import com.xiaojukeji.know.streaming.km.common.component.SpringTool; +import com.xiaojukeji.know.streaming.km.common.utils.BackoffUtils; +import com.xiaojukeji.know.streaming.km.common.utils.FutureUtil; +import com.xiaojukeji.know.streaming.km.core.service.meta.MetaDataService; +import org.springframework.context.ApplicationListener; +import org.springframework.stereotype.Service; + +@Service +public class TaskClusterDeletedListener implements ApplicationListener { + private static final ILog LOGGER = LogFactory.getLog(TaskClusterDeletedListener.class); + + @Override + public void onApplicationEvent(ClusterPhyDeletedEvent event) { + LOGGER.info("method=onApplicationEvent||clusterPhyId={}||msg=listened delete cluster", event.getClusterPhyId()); + + // 交由KS自定义的线程池,异步执行任务 + FutureUtil.quickStartupFutureUtil.submitTask( + () -> { + // 延迟60秒,避免正在运行的任务,将数据写入DB中 + BackoffUtils.backoff(60000); + + for (MetaDataService metaDataService: SpringTool.getBeansOfType(MetaDataService.class).values()) { + LOGGER.info( + "method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db starting", + event.getClusterPhyId(), metaDataService.getClass().getSimpleName() + ); + + try { + // 删除数据 + metaDataService.deleteInDBByKafkaClusterId(event.getClusterPhyId()); + + LOGGER.info( + "method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db finished", + event.getClusterPhyId(), metaDataService.getClass().getSimpleName() + ); + } catch (Exception e) { + LOGGER.error( + "method=onApplicationEvent||clusterPhyId={}||className={}||msg=delete cluster data in db failed||errMsg=exception", + event.getClusterPhyId(), metaDataService.getClass().getSimpleName(), e + ); + } + } + } + ); + + + } +}