|
@@ -1,7 +1,7 @@
|
|
|
package cn.reghao.devops.mgr.ops.srv.mon;
|
|
package cn.reghao.devops.mgr.ops.srv.mon;
|
|
|
|
|
|
|
|
-import cn.reghao.devops.mgr.config.AppProperties;
|
|
|
|
|
import cn.reghao.devops.mgr.ops.srv.mon.model.*;
|
|
import cn.reghao.devops.mgr.ops.srv.mon.model.*;
|
|
|
|
|
+import cn.reghao.jutil.jdk.converter.DateTimeConverter;
|
|
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
|
import com.fasterxml.jackson.core.JsonProcessingException;
|
|
|
import com.fasterxml.jackson.databind.JsonNode;
|
|
import com.fasterxml.jackson.databind.JsonNode;
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
@@ -35,18 +35,14 @@ import java.util.stream.Collectors;
|
|
|
@Service
|
|
@Service
|
|
|
public class PrometheusService {
|
|
public class PrometheusService {
|
|
|
private ObjectMapper objectMapper = new ObjectMapper();
|
|
private ObjectMapper objectMapper = new ObjectMapper();
|
|
|
- private PrometheusAsyncClient promClient;
|
|
|
|
|
- private Cache<String, Object> cache;
|
|
|
|
|
|
|
+ private final PrometheusClientManager prometheusClientManager;
|
|
|
|
|
+ private final Cache<String, Object> cache;
|
|
|
|
|
|
|
|
- public PrometheusService(AppProperties appProperties, Cache<String, Object> cache) {
|
|
|
|
|
- this.promClient = new PrometheusAsyncClient(appProperties.getPrometheusBaseUrl());
|
|
|
|
|
|
|
+ public PrometheusService(PrometheusClientManager prometheusClientManager, Cache<String, Object> cache) {
|
|
|
|
|
+ this.prometheusClientManager = prometheusClientManager;
|
|
|
this.cache = cache;
|
|
this.cache = cache;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- public PrometheusService() {
|
|
|
|
|
- this.promClient = new PrometheusAsyncClient("http://prometheus.iquizoo.cn");
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
public static Configuration getTemplateConfiguration() throws TemplateException, IOException {
|
|
public static Configuration getTemplateConfiguration() throws TemplateException, IOException {
|
|
|
FreeMarkerConfigurer configurer = new FreeMarkerConfigurer();
|
|
FreeMarkerConfigurer configurer = new FreeMarkerConfigurer();
|
|
|
// 1. 设置模板存放路径 (通常在 resources/templates 下)
|
|
// 1. 设置模板存放路径 (通常在 resources/templates 下)
|
|
@@ -124,7 +120,7 @@ public class PrometheusService {
|
|
|
);
|
|
);
|
|
|
|
|
|
|
|
// 异步执行
|
|
// 异步执行
|
|
|
- promClient.fetchAllMetrics(tasks).thenAccept(results -> {
|
|
|
|
|
|
|
+ prometheusClientManager.getClient().fetchAllMetrics(tasks).thenAccept(results -> {
|
|
|
processResults(results);
|
|
processResults(results);
|
|
|
}).join(); // 如果是在定时任务主线程,可以用 join 等待完成
|
|
}).join(); // 如果是在定时任务主线程,可以用 join 等待完成
|
|
|
}
|
|
}
|
|
@@ -359,7 +355,7 @@ public class PrometheusService {
|
|
|
step);
|
|
step);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- public void generatePillarReport() throws Exception {
|
|
|
|
|
|
|
+ public PillarReportDTO getPillarReportData() {
|
|
|
// 1. 计算时间范围:昨天 00:00:00 到 23:59:59
|
|
// 1. 计算时间范围:昨天 00:00:00 到 23:59:59
|
|
|
// 也可以根据需求改为:当前时间向前推 24 小时
|
|
// 也可以根据需求改为:当前时间向前推 24 小时
|
|
|
long end = Instant.now().getEpochSecond();
|
|
long end = Instant.now().getEpochSecond();
|
|
@@ -375,6 +371,7 @@ public class PrometheusService {
|
|
|
}
|
|
}
|
|
|
// 3. 计算时间戳
|
|
// 3. 计算时间戳
|
|
|
end = today3AM.toEpochSecond(); // 今天凌晨 03:00:00
|
|
end = today3AM.toEpochSecond(); // 今天凌晨 03:00:00
|
|
|
|
|
+ end = Instant.now().getEpochSecond();
|
|
|
start = end - (24 * 3600); // 昨天凌晨 03:00:00
|
|
start = end - (24 * 3600); // 昨天凌晨 03:00:00
|
|
|
|
|
|
|
|
String step = "30m"; // 30分钟一个采样点,适合 24h 趋势图
|
|
String step = "30m"; // 30分钟一个采样点,适合 24h 趋势图
|
|
@@ -396,7 +393,69 @@ public class PrometheusService {
|
|
|
log.info("开始并行抓取 Prometheus 四大支柱数据...");
|
|
log.info("开始并行抓取 Prometheus 四大支柱数据...");
|
|
|
|
|
|
|
|
// 4. 并行执行并阻塞等待结果(join)
|
|
// 4. 并行执行并阻塞等待结果(join)
|
|
|
- Map<String, String> rawResults = promClient.fetchAllMetrics0(tasks).join();
|
|
|
|
|
|
|
+ Map<String, String> rawResults = prometheusClientManager.getClient().fetchAllMetrics0(tasks).join();
|
|
|
|
|
+ try {
|
|
|
|
|
+ PillarReportDTO dto = new PillarReportDTO();
|
|
|
|
|
+ // 设置基础信息
|
|
|
|
|
+ dto.setReportDate(DateTimeConverter.format(LocalDateTime.now()));
|
|
|
|
|
+ // 解析四大指标
|
|
|
|
|
+ dto.setCpuSeries(parseMatrix(rawResults.get("cpu"), true));
|
|
|
|
|
+ dto.setMemSeries(parseMatrix(rawResults.get("mem"), true));
|
|
|
|
|
+ dto.setDiskSeries(parseMatrix(rawResults.get("disk"), true));
|
|
|
|
|
+ dto.setNetSeries(parseMatrix(rawResults.get("net"), false)); // 网络不乘100
|
|
|
|
|
+
|
|
|
|
|
+ // 提取 X 轴标签(取任意一个结果的 values 即可)
|
|
|
|
|
+ String timeLabelsStr = extractTimeLabels(rawResults.get("cpu"));
|
|
|
|
|
+ List<String> timeLabelList = Arrays.stream(timeLabelsStr.split(","))
|
|
|
|
|
+ .map(str -> str.replace("'", ""))
|
|
|
|
|
+ .collect(Collectors.toList());
|
|
|
|
|
+ //dto.setTimeLabels(timeLabelsStr);
|
|
|
|
|
+ dto.setTimeLabels(timeLabelList);
|
|
|
|
|
+ return dto;
|
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
|
+ log.error("{}", e.getMessage());
|
|
|
|
|
+ }
|
|
|
|
|
+ return null;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ public void generatePillarReport() throws Exception {
|
|
|
|
|
+ /*// 1. 计算时间范围:昨天 00:00:00 到 23:59:59
|
|
|
|
|
+ // 也可以根据需求改为:当前时间向前推 24 小时
|
|
|
|
|
+ long end = Instant.now().getEpochSecond();
|
|
|
|
|
+ long start = end - (24 * 3600);
|
|
|
|
|
+ // 1. 获取今天的凌晨 03:00:00 (基于系统默认时区)
|
|
|
|
|
+ ZonedDateTime today3AM = LocalDate.now()
|
|
|
|
|
+ .atTime(3, 0, 0)
|
|
|
|
|
+ .atZone(ZoneId.systemDefault());
|
|
|
|
|
+ // 2. 如果当前时间还没到 3 点,LocalDate.now() 拿到的 3 点其实是“未来”,
|
|
|
|
|
+ // 为了保证逻辑稳健(拿已经过去的完整 24h),可以加个判断:
|
|
|
|
|
+ if (ZonedDateTime.now().isBefore(today3AM)) {
|
|
|
|
|
+ today3AM = today3AM.minusDays(1);
|
|
|
|
|
+ }
|
|
|
|
|
+ // 3. 计算时间戳
|
|
|
|
|
+ end = today3AM.toEpochSecond(); // 今天凌晨 03:00:00
|
|
|
|
|
+ start = end - (24 * 3600); // 昨天凌晨 03:00:00
|
|
|
|
|
+
|
|
|
|
|
+ String step = "30m"; // 30分钟一个采样点,适合 24h 趋势图
|
|
|
|
|
+
|
|
|
|
|
+ // 2. 定义 PromQL 查询语句
|
|
|
|
|
+ String cpuQuery = "1 - avg(irate(node_cpu_seconds_total{mode='idle'}[5m])) by (instance)";
|
|
|
|
|
+ String memQuery = "1 - (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes)";
|
|
|
|
|
+ String diskQuery = "max(rate(node_disk_io_time_seconds_total[5m])) by (instance)";
|
|
|
|
|
+ String netQuery = "sum(irate(node_network_receive_bytes_total[5m])) by (instance) / 1024 / 1024";
|
|
|
|
|
+
|
|
|
|
|
+ // 3. 构造异步任务 Map
|
|
|
|
|
+ // 注意:这里调用的是 query_range 接口
|
|
|
|
|
+ Map<String, String> tasks = Map.of(
|
|
|
|
|
+ "cpu", buildRangeUrl(cpuQuery, start, end, step),
|
|
|
|
|
+ "mem", buildRangeUrl(memQuery, start, end, step),
|
|
|
|
|
+ "disk", buildRangeUrl(diskQuery, start, end, step),
|
|
|
|
|
+ "net", buildRangeUrl(netQuery, start, end, step)
|
|
|
|
|
+ );
|
|
|
|
|
+ log.info("开始并行抓取 Prometheus 四大支柱数据...");
|
|
|
|
|
+
|
|
|
|
|
+ // 4. 并行执行并阻塞等待结果(join)
|
|
|
|
|
+ Map<String, String> rawResults = prometheusClientManager.getClient().fetchAllMetrics0(tasks).join();
|
|
|
|
|
|
|
|
PillarReportDTO dto = new PillarReportDTO();
|
|
PillarReportDTO dto = new PillarReportDTO();
|
|
|
// 设置基础信息
|
|
// 设置基础信息
|
|
@@ -408,7 +467,9 @@ public class PrometheusService {
|
|
|
dto.setNetSeries(parseMatrix(rawResults.get("net"), false)); // 网络不乘100
|
|
dto.setNetSeries(parseMatrix(rawResults.get("net"), false)); // 网络不乘100
|
|
|
|
|
|
|
|
// 提取 X 轴标签(取任意一个结果的 values 即可)
|
|
// 提取 X 轴标签(取任意一个结果的 values 即可)
|
|
|
- dto.setTimeLabels(extractTimeLabels(rawResults.get("cpu")));
|
|
|
|
|
|
|
+ dto.setTimeLabels(extractTimeLabels(rawResults.get("cpu")));*/
|
|
|
|
|
+
|
|
|
|
|
+ PillarReportDTO dto = getPillarReportData();
|
|
|
|
|
|
|
|
// 1. 准备数据模型 (Root Map)
|
|
// 1. 准备数据模型 (Root Map)
|
|
|
// 在模板中可以通过 ${report.reportDate} 或直接 ${reportDate} 访问
|
|
// 在模板中可以通过 ${report.reportDate} 或直接 ${reportDate} 访问
|
|
@@ -466,7 +527,8 @@ public class PrometheusService {
|
|
|
String timeLabel = Instant.ofEpochSecond(v.get(0).asLong())
|
|
String timeLabel = Instant.ofEpochSecond(v.get(0).asLong())
|
|
|
.atZone(ZoneId.systemDefault())
|
|
.atZone(ZoneId.systemDefault())
|
|
|
.format(DateTimeFormatter.ofPattern("HH:mm"));
|
|
.format(DateTimeFormatter.ofPattern("HH:mm"));
|
|
|
- timeLabels.add("'" + timeLabel + "'");
|
|
|
|
|
|
|
+ //timeLabels.add("'" + timeLabel + "'");
|
|
|
|
|
+ timeLabels.add(timeLabel);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
labelsExtracted = true;
|
|
labelsExtracted = true;
|
|
@@ -530,7 +592,7 @@ public class PrometheusService {
|
|
|
// 1. 定义查询语句
|
|
// 1. 定义查询语句
|
|
|
String cpuQuery = """
|
|
String cpuQuery = """
|
|
|
sum(
|
|
sum(
|
|
|
- irate(container_cpu_usage_seconds_total{name!=""}[5m])
|
|
|
|
|
|
|
+ irate(container_cpu_usage_seconds_total{name!=''}[5m])
|
|
|
) by (name, instance) * 100
|
|
) by (name, instance) * 100
|
|
|
""";
|
|
""";
|
|
|
String memQuery = """
|
|
String memQuery = """
|
|
@@ -556,8 +618,8 @@ public class PrometheusService {
|
|
|
String step = "30m";
|
|
String step = "30m";
|
|
|
|
|
|
|
|
// 3. 并行抓取数据(推荐异步 join,提高效率)
|
|
// 3. 并行抓取数据(推荐异步 join,提高效率)
|
|
|
- CompletableFuture<String> cpuFuture = promClient.queryRange(cpuQuery, start, end, step);
|
|
|
|
|
- CompletableFuture<String> memFuture = promClient.queryRange(memQuery, start, end, step);
|
|
|
|
|
|
|
+ CompletableFuture<String> cpuFuture = prometheusClientManager.getClient().queryRange(cpuQuery, start, end, step);
|
|
|
|
|
+ CompletableFuture<String> memFuture = prometheusClientManager.getClient().queryRange(memQuery, start, end, step);
|
|
|
String cpuJson = cpuFuture.join();
|
|
String cpuJson = cpuFuture.join();
|
|
|
String memJson = memFuture.join();
|
|
String memJson = memFuture.join();
|
|
|
// 4. 解析数据
|
|
// 4. 解析数据
|
|
@@ -600,10 +662,10 @@ public class PrometheusService {
|
|
|
String step = "30m";
|
|
String step = "30m";
|
|
|
|
|
|
|
|
// 3. 并行抓取
|
|
// 3. 并行抓取
|
|
|
- CompletableFuture<String> cpuFuture = promClient.queryRange(cpuQuery, start, end, step);
|
|
|
|
|
- CompletableFuture<String> cpuOldFuture = promClient.queryRange(cpuQueryOld, start, end, step);
|
|
|
|
|
- CompletableFuture<String> memFuture = promClient.queryRange(memQuery, start, end, step);
|
|
|
|
|
- CompletableFuture<String> memOldFuture = promClient.queryRange(memQueryOld, start, end, step);
|
|
|
|
|
|
|
+ CompletableFuture<String> cpuFuture = prometheusClientManager.getClient().queryRange(cpuQuery, start, end, step);
|
|
|
|
|
+ CompletableFuture<String> cpuOldFuture = prometheusClientManager.getClient().queryRange(cpuQueryOld, start, end, step);
|
|
|
|
|
+ CompletableFuture<String> memFuture = prometheusClientManager.getClient().queryRange(memQuery, start, end, step);
|
|
|
|
|
+ CompletableFuture<String> memOldFuture = prometheusClientManager.getClient().queryRange(memQueryOld, start, end, step);
|
|
|
|
|
|
|
|
CompletableFuture.allOf(cpuFuture, cpuOldFuture, memFuture, memOldFuture).join();
|
|
CompletableFuture.allOf(cpuFuture, cpuOldFuture, memFuture, memOldFuture).join();
|
|
|
|
|
|
|
@@ -634,7 +696,8 @@ public class PrometheusService {
|
|
|
// Caffeine 的 get 方法天然支持并发锁,防止击穿
|
|
// Caffeine 的 get 方法天然支持并发锁,防止击穿
|
|
|
Object result = cache.get(cacheKey, key -> {
|
|
Object result = cache.get(cacheKey, key -> {
|
|
|
try {
|
|
try {
|
|
|
- return getContainerReportData();
|
|
|
|
|
|
|
+ String containerName = "file-prod";
|
|
|
|
|
+ return null;
|
|
|
} catch (Exception e) {
|
|
} catch (Exception e) {
|
|
|
log.error("Failed to generate report", e);
|
|
log.error("Failed to generate report", e);
|
|
|
return null;
|
|
return null;
|
|
@@ -643,33 +706,70 @@ public class PrometheusService {
|
|
|
return (ContainerReportVO) result;
|
|
return (ContainerReportVO) result;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
- public ContainerReportVO getContainerReportData() throws Exception {
|
|
|
|
|
- // 1. 定义查询语句 (修正后的 PromQL)
|
|
|
|
|
- String cpuQuery = "sum(irate(container_cpu_usage_seconds_total{name=~'.*-prod'}[5m])) by (name, instance) * 100";
|
|
|
|
|
- String cpuQueryOld = "sum(irate(container_cpu_usage_seconds_total{name=~'.*-prod'}[5m] offset 1d)) by (name, instance) * 100";
|
|
|
|
|
- String memQuery = "sum(container_memory_working_set_bytes{name=~'.*-prod'}) by (name, instance) / 1024 / 1024";
|
|
|
|
|
- String memQueryOld = "sum(container_memory_working_set_bytes{name=~'.*-prod' } offset 1d) by (name, instance) / 1024 / 1024";
|
|
|
|
|
|
|
+ public ContainerReportVO getNodeReportData(String ipv4Address) {
|
|
|
|
|
+ String instance = ipv4Address;
|
|
|
|
|
+ // 定义查询任务
|
|
|
|
|
+ Map<String, String> queryMap = Map.of(
|
|
|
|
|
+ "cpuQuery", "sum(irate(container_cpu_usage_seconds_total{instance=~'^" + instance + ":.*'}[5m])) by (name, instance) * 100",
|
|
|
|
|
+ "cpuQueryOld", "sum(irate(container_cpu_usage_seconds_total{instance=~'^" + instance + ":.*'}[5m] offset 1d)) by (name, instance) * 100",
|
|
|
|
|
+ "memQuery", "sum(container_memory_working_set_bytes{instance=~'^" + instance + ":.*'}) by (name, instance) / 1024 / 1024",
|
|
|
|
|
+ "memQueryOld", "sum(container_memory_working_set_bytes{instance=~'^" + instance + ":.*' } offset 1d) by (name, instance) / 1024 / 1024"
|
|
|
|
|
+ );
|
|
|
|
|
|
|
|
- // 2. 时间范围计算 (今日凌晨 03:00)
|
|
|
|
|
- ZonedDateTime today3AM = LocalDate.now().atTime(3, 0, 0).atZone(ZoneId.systemDefault());
|
|
|
|
|
- if (ZonedDateTime.now().isBefore(today3AM)) today3AM = today3AM.minusDays(1);
|
|
|
|
|
|
|
+ try {
|
|
|
|
|
+ ContainerReportVO containerReportVO = getReportData(queryMap);
|
|
|
|
|
+ return containerReportVO;
|
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
|
+ throw new RuntimeException(e);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
- long end = today3AM.toEpochSecond();
|
|
|
|
|
|
|
+ public ContainerReportVO getContainerReportData(String containerName) {
|
|
|
|
|
+ // 定义查询任务
|
|
|
|
|
+ Map<String, String> queryMap = Map.of(
|
|
|
|
|
+ "cpuQuery", "sum(irate(container_cpu_usage_seconds_total{name='" + containerName + "'}[5m])) by (name, instance) * 100",
|
|
|
|
|
+ "cpuQueryOld", "sum(irate(container_cpu_usage_seconds_total{name='" + containerName + "'}[5m] offset 1d)) by (name, instance) * 100",
|
|
|
|
|
+ "memQuery", "sum(container_memory_working_set_bytes{name='" + containerName + "'}) by (name, instance) / 1024 / 1024",
|
|
|
|
|
+ "memQueryOld", "sum(container_memory_working_set_bytes{name='" + containerName + "' } offset 1d) by (name, instance) / 1024 / 1024",
|
|
|
|
|
+ "throttleQuery", "sum(increase(container_cpu_cfs_throttled_periods_total{name='" + containerName + "'}[5m])) by (name, instance)"
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ try {
|
|
|
|
|
+ ContainerReportVO containerReportVO = getReportData(queryMap);
|
|
|
|
|
+ return containerReportVO;
|
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
|
+ throw new RuntimeException(e);
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ private ContainerReportVO getReportData(Map<String, String> queryMap) throws Exception {
|
|
|
|
|
+ // 2. 时间范围计算(过去 24h)
|
|
|
|
|
+ long end = Instant.now().getEpochSecond();
|
|
|
long start = end - (24 * 3600);
|
|
long start = end - (24 * 3600);
|
|
|
String step = "30m";
|
|
String step = "30m";
|
|
|
|
|
|
|
|
// 3. 并行抓取
|
|
// 3. 并行抓取
|
|
|
- CompletableFuture<String> cpuFuture = promClient.queryRange(cpuQuery, start, end, step);
|
|
|
|
|
- CompletableFuture<String> cpuOldFuture = promClient.queryRange(cpuQueryOld, start, end, step);
|
|
|
|
|
- CompletableFuture<String> memFuture = promClient.queryRange(memQuery, start, end, step);
|
|
|
|
|
- CompletableFuture<String> memOldFuture = promClient.queryRange(memQueryOld, start, end, step);
|
|
|
|
|
- CompletableFuture.allOf(cpuFuture, cpuOldFuture, memFuture, memOldFuture).join();
|
|
|
|
|
|
|
+ CompletableFuture<String> cpuFuture = prometheusClientManager.getClient().queryRange(queryMap.get("cpuQuery"), start, end, step);
|
|
|
|
|
+ CompletableFuture<String> cpuOldFuture = prometheusClientManager.getClient().queryRange(queryMap.get("cpuQueryOld"), start, end, step);
|
|
|
|
|
+ CompletableFuture<String> memFuture = prometheusClientManager.getClient().queryRange(queryMap.get("memQuery"), start, end, step);
|
|
|
|
|
+ CompletableFuture<String> memOldFuture = prometheusClientManager.getClient().queryRange(queryMap.get("memQueryOld"), start, end, step);
|
|
|
|
|
+ CompletableFuture<String> throttleQuery = null;
|
|
|
|
|
+ if (queryMap.get("throttleQuery") != null) {
|
|
|
|
|
+ throttleQuery = prometheusClientManager.getClient().queryRange(queryMap.get("throttleQuery"), start, end, step);
|
|
|
|
|
+ CompletableFuture.allOf(cpuFuture, cpuOldFuture, memFuture, memOldFuture, throttleQuery).join();
|
|
|
|
|
+ } else {
|
|
|
|
|
+ CompletableFuture.allOf(cpuFuture, cpuOldFuture, memFuture, memOldFuture).join();
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
// 4. 解析原始数据 (假设解析出的结构依然是 Map<Instance, Map<Container, List<Double>>>)
|
|
// 4. 解析原始数据 (假设解析出的结构依然是 Map<Instance, Map<Container, List<Double>>>)
|
|
|
Map<String, Map<String, List<Double>>> cpuT = parseCpuJson(cpuFuture.get());
|
|
Map<String, Map<String, List<Double>>> cpuT = parseCpuJson(cpuFuture.get());
|
|
|
Map<String, Map<String, List<Double>>> cpuY = parseCpuJson(cpuOldFuture.get());
|
|
Map<String, Map<String, List<Double>>> cpuY = parseCpuJson(cpuOldFuture.get());
|
|
|
Map<String, Map<String, List<Double>>> memT = parseMemJson(memFuture.get());
|
|
Map<String, Map<String, List<Double>>> memT = parseMemJson(memFuture.get());
|
|
|
Map<String, Map<String, List<Double>>> memY = parseMemJson(memOldFuture.get());
|
|
Map<String, Map<String, List<Double>>> memY = parseMemJson(memOldFuture.get());
|
|
|
|
|
+ Map<String, Map<String, List<Double>>> cpuThrottleMap = new HashMap<>();
|
|
|
|
|
+ if (throttleQuery != null) {
|
|
|
|
|
+ cpuThrottleMap = parseCpuJson(throttleQuery.get());
|
|
|
|
|
+ }
|
|
|
|
|
|
|
|
// 5. 核心:按实例(Instance)维度聚合数据
|
|
// 5. 核心:按实例(Instance)维度聚合数据
|
|
|
// 获取所有出现过的实例名并去重
|
|
// 获取所有出现过的实例名并去重
|
|
@@ -678,9 +778,9 @@ public class PrometheusService {
|
|
|
allInstanceNames.addAll(cpuY.keySet());
|
|
allInstanceNames.addAll(cpuY.keySet());
|
|
|
allInstanceNames.addAll(memT.keySet());
|
|
allInstanceNames.addAll(memT.keySet());
|
|
|
allInstanceNames.addAll(memY.keySet());
|
|
allInstanceNames.addAll(memY.keySet());
|
|
|
|
|
+ allInstanceNames.addAll(cpuThrottleMap.keySet());
|
|
|
|
|
|
|
|
List<HostData> instanceList = new ArrayList<>();
|
|
List<HostData> instanceList = new ArrayList<>();
|
|
|
-
|
|
|
|
|
for (String instName : allInstanceNames) {
|
|
for (String instName : allInstanceNames) {
|
|
|
HostData instData = new HostData();
|
|
HostData instData = new HostData();
|
|
|
instData.setName(instName);
|
|
instData.setName(instName);
|
|
@@ -697,6 +797,11 @@ public class PrometheusService {
|
|
|
memGroup.setYesterday(memY.getOrDefault(instName, new HashMap<>()));
|
|
memGroup.setYesterday(memY.getOrDefault(instName, new HashMap<>()));
|
|
|
instData.setMem(memGroup);
|
|
instData.setMem(memGroup);
|
|
|
|
|
|
|
|
|
|
+ // 组装 cpuThrottle 组
|
|
|
|
|
+ MetricGroup cpuThrottleGroup = new MetricGroup();
|
|
|
|
|
+ cpuThrottleGroup.setToday(cpuThrottleMap.getOrDefault(instName, new HashMap<>()));
|
|
|
|
|
+ instData.setCpuThrottle(cpuThrottleGroup);
|
|
|
|
|
+
|
|
|
instanceList.add(instData);
|
|
instanceList.add(instData);
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -704,7 +809,6 @@ public class PrometheusService {
|
|
|
ContainerReportVO report = new ContainerReportVO();
|
|
ContainerReportVO report = new ContainerReportVO();
|
|
|
report.setTimeLabels(getCpuTimeLabels(cpuFuture.get()));
|
|
report.setTimeLabels(getCpuTimeLabels(cpuFuture.get()));
|
|
|
report.setInstances(instanceList);
|
|
report.setInstances(instanceList);
|
|
|
-
|
|
|
|
|
return report;
|
|
return report;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
@@ -726,7 +830,7 @@ public class PrometheusService {
|
|
|
);
|
|
);
|
|
|
|
|
|
|
|
// 异步执行
|
|
// 异步执行
|
|
|
- promClient.fetchAllMetrics(tasks).thenAccept(results -> {
|
|
|
|
|
|
|
+ prometheusClientManager.getClient().fetchAllMetrics(tasks).thenAccept(results -> {
|
|
|
processResults0(results);
|
|
processResults0(results);
|
|
|
}).join(); // 如果是在定时任务主线程,可以用 join 等待完成
|
|
}).join(); // 如果是在定时任务主线程,可以用 join 等待完成
|
|
|
}
|
|
}
|
|
@@ -748,11 +852,317 @@ public class PrometheusService {
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ public DailyReportDTO getDailyReportData() {
|
|
|
|
|
+ // 定义查询任务
|
|
|
|
|
+ Map<String, String> tasks = new HashMap<>();
|
|
|
|
|
+ // 使用 put 方法依次添加原有的 PromQL 任务
|
|
|
|
|
+ tasks.put("container_cpu", """
|
|
|
|
|
+ topk(10,\s
|
|
|
|
|
+ sum(
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ increase(container_cpu_cfs_throttled_seconds_total[24h]),
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )
|
|
|
|
|
+ ) by (name, instance)
|
|
|
|
|
+ )
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ tasks.put("container_mem", """
|
|
|
|
|
+ topk(10,\s
|
|
|
|
|
+ avg by (name, instance) (
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ (container_memory_working_set_bytes{name!=""} / container_spec_memory_limit_bytes > 0) * 100,
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )
|
|
|
|
|
+ )
|
|
|
|
|
+ )
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ tasks.put("node_disk", """
|
|
|
|
|
+ avg_over_time(
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ irate(node_disk_io_time_seconds_total[10m]),
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )[24h:1m]
|
|
|
|
|
+ )
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ tasks.put("node_inode", """
|
|
|
|
|
+ topk(10, (1 - node_filesystem_files_free / node_filesystem_files) * 100)
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ tasks.put("node_fd", """
|
|
|
|
|
+ topk(10, (node_filefd_allocated / node_filefd_maximum) * 100)
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ tasks.put("node_disk_usage", """
|
|
|
|
|
+ topk(10, (1 - node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}) * 100)
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ // 24h 内 TCP 正常连接的最大并发数
|
|
|
|
|
+ tasks.put("net_tcp_est_max", """
|
|
|
|
|
+ max_over_time(label_replace(node_netstat_Tcp_CurrEstab, "state", "ESTABLISHED", "", "")[24h:])
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ // 24h 内 TCP 等待关闭连接的最大堆积数
|
|
|
|
|
+ tasks.put("net_tcp_tw_max", """
|
|
|
|
|
+ max_over_time(label_replace(node_sockstat_TCP_tw, "state", "TIME_WAIT", "", "")[24h:])
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ // TCP 全连接队列溢出 (ListenOverflows) - 24h 增量
|
|
|
|
|
+ tasks.put("net_tcp_overflow", """
|
|
|
|
|
+ topk(10, increase(node_netstat_TcpExt_ListenOverflows[24h]))
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ // TCP 丢弃计数 (TcpExt_ListenDrops) - 24h 增量
|
|
|
|
|
+ tasks.put("net_tcp_drops", """
|
|
|
|
|
+ topk(10, increase(node_netstat_TcpExt_ListenDrops[24h]))
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ // 新增的三个任务
|
|
|
|
|
+ tasks.put("node_oom", "increase(node_vmstat_oom_kill[24h])");
|
|
|
|
|
+ tasks.put("node_clock", "abs(node_timex_offset_seconds)");
|
|
|
|
|
+ tasks.put("node_ro_fs", "node_filesystem_readonly{mountpoint='/'}");
|
|
|
|
|
+ // 逻辑:计算 node 级别每秒上下文切换次数
|
|
|
|
|
+ tasks.put("node_context_switch", """
|
|
|
|
|
+ topk(10, avg_over_time(rate(node_context_switches_total[5m])[24h:1m]))
|
|
|
|
|
+ """);
|
|
|
|
|
+
|
|
|
|
|
+ /*Map<String, String> tasks = Map.of(
|
|
|
|
|
+ "container_cpu", """
|
|
|
|
|
+ topk(10,\s
|
|
|
|
|
+ sum(
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ increase(container_cpu_cfs_throttled_seconds_total[24h]),
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )
|
|
|
|
|
+ ) by (name, instance)
|
|
|
|
|
+ )
|
|
|
|
|
+ """,
|
|
|
|
|
+ "container_mem", """
|
|
|
|
|
+ topk(10,\s
|
|
|
|
|
+ avg by (name, instance) (
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ (container_memory_working_set_bytes{name!=""} / container_spec_memory_limit_bytes > 0) * 100,
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )
|
|
|
|
|
+ )
|
|
|
|
|
+ )
|
|
|
|
|
+ """,
|
|
|
|
|
+ "node_disk", """
|
|
|
|
|
+ avg_over_time(
|
|
|
|
|
+ label_replace(
|
|
|
|
|
+ irate(node_disk_io_time_seconds_total[10m]),
|
|
|
|
|
+ "instance", "$1", "instance", "([^:]+):.*"
|
|
|
|
|
+ )[24h:1m]
|
|
|
|
|
+ )
|
|
|
|
|
+ """,
|
|
|
|
|
+ "node_inode", """
|
|
|
|
|
+ topk(10, (1 - node_filesystem_files_free / node_filesystem_files) * 100)
|
|
|
|
|
+ """,
|
|
|
|
|
+ "node_fd", """
|
|
|
|
|
+ topk(10, (node_filefd_allocated / node_filefd_maximum) * 100)
|
|
|
|
|
+ """,
|
|
|
|
|
+ "node_disk_usage", """
|
|
|
|
|
+ topk(10, (1 - node_filesystem_avail_bytes{mountpoint="/"} / node_filesystem_size_bytes{mountpoint="/"}) * 100)
|
|
|
|
|
+ """,
|
|
|
|
|
+ // 24h 内 TCP 正常连接的最大并发数
|
|
|
|
|
+ "net_tcp_est_max", """
|
|
|
|
|
+ max_over_time(label_replace(node_netstat_Tcp_CurrEstab, "state", "ESTABLISHED", "", "")[24h:])
|
|
|
|
|
+ """,
|
|
|
|
|
+ // 24h 内 TCP 等待关闭连接的最大堆积数
|
|
|
|
|
+ "net_tcp_tw_max", """
|
|
|
|
|
+ max_over_time(label_replace(node_sockstat_TCP_tw, "state", "TIME_WAIT", "", "")[24h:])
|
|
|
|
|
+ """,
|
|
|
|
|
+ // TCP 全连接队列溢出 (ListenOverflows) - 24h 增量
|
|
|
|
|
+ // 如果这个值 > 0,说明应用 backlog 满了,正在丢弃新连接
|
|
|
|
|
+ "net_tcp_overflow", """
|
|
|
|
|
+ topk(10, increase(node_netstat_TcpExt_ListenOverflows[24h]))
|
|
|
|
|
+ """,
|
|
|
|
|
+ // TCP 丢弃计数 (TcpExt_ListenDrops) - 24h 增量
|
|
|
|
|
+ "net_tcp_drops", """
|
|
|
|
|
+ topk(10, increase(node_netstat_TcpExt_ListenDrops[24h]))
|
|
|
|
|
+ """
|
|
|
|
|
+ );*/
|
|
|
|
|
+
|
|
|
|
|
+ return prometheusClientManager.getClient()
|
|
|
|
|
+ .fetchAllMetrics(tasks)
|
|
|
|
|
+ .thenApply(this::processResults1) // 这里 processResults1 需要改为返回 DailyReportDTO
|
|
|
|
|
+ .join();
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ public DailyReportDTO processResults00(Map<String, String> results) {
|
|
|
|
|
+ DailyReportDTO report = new DailyReportDTO();
|
|
|
|
|
+
|
|
|
|
|
+ try {
|
|
|
|
|
+ report.setCpuThrottled(parsePrometheusJson(results.get("container_cpu"), "name"));
|
|
|
|
|
+ report.setMemRisk(parsePrometheusJson(results.get("container_mem"), "name"));
|
|
|
|
|
+ report.setDiskIo(parsePrometheusJson(results.get("node_disk"), "instance"));
|
|
|
|
|
+
|
|
|
|
|
+ // 简单的状态判定逻辑
|
|
|
|
|
+ if (report.getMemRisk().stream().anyMatch(i -> i.getValue() > 90)) {
|
|
|
|
|
+ report.setStatusSummary("存在内存风险点");
|
|
|
|
|
+ }
|
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
|
+ log.error("解析监控数据失败", e);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return report;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // 定义全局风险阈值
|
|
|
|
|
+ private static final double MEM_RISK_THRESHOLD = 30.0; // 内存超过85%需注意
|
|
|
|
|
+ private static final double CPU_THROTTLE_THRESHOLD = 1000.0; // 24h节流超过10秒需注意
|
|
|
|
|
+ private static final double DISK_IO_THRESHOLD = 1.0; // IO Wait超过50ms需注意
|
|
|
|
|
+ private static final double INODE_RISK_THRESHOLD = 10.0; // Inode 超过 80% 需注意
|
|
|
|
|
+ private static final double FD_RISK_THRESHOLD = 10.0; // 句柄占用超过 80% 需注意
|
|
|
|
|
+ private static final double DISK_USAGE_THRESHOLD = 80.0; // 磁盘空间超过 85% 需注意
|
|
|
|
|
+ private static final double TCP_EST_THRESHOLD = 5000.0; // 单机并发超过5000需注意(根据业务调整)
|
|
|
|
|
+ private static final double TCP_TW_THRESHOLD = 50.0; // TIME_WAIT 超过 5000 需注意
|
|
|
|
|
+ private static final double NET_DROP_THRESHOLD = 1.0; // 只要有 1 个溢出或丢弃就需注意
|
|
|
|
|
+ private static final double OOM_KILL_THRESHOLD = 1.0; // 24h 内发生过 OOM
|
|
|
|
|
+ private static final double CLOCK_SKEW_THRESHOLD = 0.5; // 时钟偏移超过 500ms
|
|
|
|
|
+ private static final double READONLY_FS_THRESHOLD = 1.0; // 存在只读文件系统
|
|
|
|
|
+ private static final double ZOMBIE_PROCS_THRESHOLD = 5.0; // 僵尸进程过多
|
|
|
|
|
+ // 假设是 8 核机器,总切换数超过 50,000 需注意
|
|
|
|
|
+ private static final double CONTEXT_SWITCH_THRESHOLD = 50000.0;
|
|
|
|
|
+
|
|
|
|
|
+ public DailyReportDTO processResults1(Map<String, String> results) {
|
|
|
|
|
+ DailyReportDTO report = new DailyReportDTO();
|
|
|
|
|
+ try {
|
|
|
|
|
+ // 1. 解析并筛选 CPU 节流 (只保留显著受限的容器)
|
|
|
|
|
+ report.setCpuThrottled(
|
|
|
|
|
+ parseAndFilter(results.get("container_cpu"), "name", CPU_THROTTLE_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 2. 解析并筛选 内存风险 (只保留接近 Limit 的容器)
|
|
|
|
|
+ report.setMemRisk(
|
|
|
|
|
+ parseAndFilter(results.get("container_mem"), "name", MEM_RISK_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 3. 解析并筛选 磁盘 IO (只保留高负载节点)
|
|
|
|
|
+ report.setDiskIo(
|
|
|
|
|
+ parseAndFilter(results.get("node_disk"), "instance", DISK_IO_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ report.setInodeRisk(
|
|
|
|
|
+ parseAndFilter(results.get("node_inode"), "instance", INODE_RISK_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 2. 新增:解析并筛选文件句柄风险
|
|
|
|
|
+ report.setFdRisk(
|
|
|
|
|
+ parseAndFilter(results.get("node_fd"), "instance", FD_RISK_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ report.setDiskUsageRisk(
|
|
|
|
|
+ parseAndFilter(results.get("node_disk_usage"), "instance", DISK_USAGE_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 解析 24h TCP EST 峰值
|
|
|
|
|
+ report.setNetEstMax(
|
|
|
|
|
+ parseAndFilter(results.get("net_tcp_est_max"), "state", TCP_EST_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 解析并筛选 24h TCP TIME_WAIT 风险
|
|
|
|
|
+ report.setNetTwMax(
|
|
|
|
|
+ parseAndFilter(results.get("net_tcp_tw_max"), "state", TCP_TW_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 筛选全连接队列溢出和丢弃
|
|
|
|
|
+ report.setNetOverflows(
|
|
|
|
|
+ parseAndFilter(results.get("net_tcp_overflow"), "instance", NET_DROP_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+ report.setNetDrops(
|
|
|
|
|
+ parseAndFilter(results.get("net_tcp_drops"), "instance", NET_DROP_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ if (!report.getMemRisk().isEmpty()) {
|
|
|
|
|
+ report.getAdvices().put("memRisk", "内存风险排查流程");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ // 2. 解析并筛选上下文切换风险
|
|
|
|
|
+ report.setContextSwitchRisk(
|
|
|
|
|
+ parseAndFilter(results.get("node_context_switch"), "instance", CONTEXT_SWITCH_THRESHOLD)
|
|
|
|
|
+ );
|
|
|
|
|
+
|
|
|
|
|
+ // 计算汇总状态
|
|
|
|
|
+ long totalIssues = report.getCpuThrottled().size() +
|
|
|
|
|
+ report.getMemRisk().size() +
|
|
|
|
|
+ report.getDiskIo().size() +
|
|
|
|
|
+ report.getInodeRisk().size() +
|
|
|
|
|
+ report.getFdRisk().size() +
|
|
|
|
|
+ report.getDiskUsageRisk().size() +
|
|
|
|
|
+ report.getNetEstMax().size() +
|
|
|
|
|
+ report.getNetTwMax().size() +
|
|
|
|
|
+ report.getNetOverflows().size() +
|
|
|
|
|
+ report.getNetDrops().size();
|
|
|
|
|
+
|
|
|
|
|
+ report.setStatusSummary(totalIssues > 0 ? "发现 " + totalIssues + " 项待处理异常" : "所有指标正常");
|
|
|
|
|
+ } catch (Exception e) {
|
|
|
|
|
+ log.error("{}", e.getMessage());
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ return report;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ private List<DailyReportDTO.MetricItem> parseAndFilter(String json, String nameLabel, double threshold) throws Exception {
|
|
|
|
|
+ List<DailyReportDTO.MetricItem> filteredItems = new ArrayList<>();
|
|
|
|
|
+ JsonNode resultNode = objectMapper.readTree(json).path("data").path("result");
|
|
|
|
|
+
|
|
|
|
|
+ if (resultNode.isArray()) {
|
|
|
|
|
+ for (JsonNode node : resultNode) {
|
|
|
|
|
+ double val = node.path("value").get(1).asDouble();
|
|
|
|
|
+
|
|
|
|
|
+ // 核心逻辑:只有超过阈值的才加入报告
|
|
|
|
|
+ if (val >= threshold) {
|
|
|
|
|
+ String name = node.path("metric").path(nameLabel).asText("unknown");
|
|
|
|
|
+ String instance = node.path("metric").path("instance").asText("unknown");
|
|
|
|
|
+ String name1 = String.format("%s_%s", instance, name);
|
|
|
|
|
+ String job = node.path("metric").path("job").asText("unknown");
|
|
|
|
|
+ if ("node-exporter".equals(job)) {
|
|
|
|
|
+ name1 = instance.split(":")[0];
|
|
|
|
|
+ instance = name1;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ filteredItems.add(new DailyReportDTO.MetricItem(name1, instance, Math.round(val * 100.0) / 100.0, 0.0));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ // 按数值倒序排列,最重要的放在最上面
|
|
|
|
|
+ filteredItems.sort((a, b) -> b.getValue().compareTo(a.getValue()));
|
|
|
|
|
+ return filteredItems;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ private List<DailyReportDTO.MetricItem> parsePrometheusJson(String json, String nameLabel) throws Exception {
|
|
|
|
|
+ List<DailyReportDTO.MetricItem> items = new ArrayList<>();
|
|
|
|
|
+ JsonNode root = objectMapper.readTree(json);
|
|
|
|
|
+ JsonNode resultNode = root.path("data").path("result");
|
|
|
|
|
+
|
|
|
|
|
+ if (resultNode.isArray()) {
|
|
|
|
|
+ for (JsonNode node : resultNode) {
|
|
|
|
|
+ JsonNode metric = node.path("metric");
|
|
|
|
|
+ // Prometheus 返回的 value 是 [timestamp, "value"]
|
|
|
|
|
+ JsonNode valueArray = node.path("value");
|
|
|
|
|
+ if (valueArray.isMissingNode()) {
|
|
|
|
|
+ // 如果是 range query, 取 values 数组的最后一个
|
|
|
|
|
+ valueArray = node.path("values").get(node.path("values").size() - 1);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ String name = metric.path(nameLabel).asText("unknown");
|
|
|
|
|
+ String instance = metric.path("instance").asText("unknown");
|
|
|
|
|
+ Double val = valueArray.get(1).asDouble();
|
|
|
|
|
+
|
|
|
|
|
+ items.add(new DailyReportDTO.MetricItem(name, instance, Math.round(val * 100.0) / 100.0, 0.0));
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+ return items;
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
public static void main(String[] args) throws Exception {
|
|
public static void main(String[] args) throws Exception {
|
|
|
- PrometheusService prometheusService = new PrometheusService();
|
|
|
|
|
- prometheusService.generateContainerReport1();
|
|
|
|
|
- prometheusService.generateContainerReport();
|
|
|
|
|
- prometheusService.generatePillarReport();
|
|
|
|
|
- prometheusService.generateDailyReport();
|
|
|
|
|
|
|
+ //PrometheusService prometheusService = new PrometheusService(appProperties, null);
|
|
|
|
|
+ /*prometheusService.generateContainerReport1();
|
|
|
|
|
+ prometheusService.generateContainerReport();*/
|
|
|
|
|
+ //prometheusService.generatePillarReport();
|
|
|
|
|
+ //prometheusService.generateDailyReport();
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|