- Sort measurement tags before writing to InfluxDB.

- Update 3rd party dependencies.
This commit is contained in:
Mark Nellemann 2022-03-01 20:09:35 +01:00
parent 6c2993ed97
commit 28b4cdbea8
5 changed files with 36 additions and 17 deletions

17
CHANGELOG.md Normal file
View file

@ -0,0 +1,17 @@
# Changelog
All notable changes to this project will be documented in this file.
## [1.2.8] - 2022-02-28
### Changed
- Sort measurement tags before writing to InfluxDB.
- Update 3rd party dependencies.
## [1.2.7] - 2022-02-24
### Added
- Options to include/exclude Managed Systems and/or Logical Partitions.
[1.2.8]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.8%0Dv1.2.7
[1.2.7]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.7%0Dv1.2.6
[1.2.6]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.6%0Dv1.2.5

View file

@ -23,24 +23,24 @@ targetCompatibility = projectJavaVersion
dependencies { dependencies {
annotationProcessor 'info.picocli:picocli-codegen:4.6.2' annotationProcessor 'info.picocli:picocli-codegen:4.6.2'
implementation 'info.picocli:picocli:4.6.1' implementation 'info.picocli:picocli:4.6.3'
implementation 'org.jsoup:jsoup:1.14.3' implementation 'org.jsoup:jsoup:1.14.3'
implementation 'com.squareup.okhttp3:okhttp:4.9.3' implementation 'com.squareup.okhttp3:okhttp:4.9.3'
implementation 'com.squareup.moshi:moshi:1.13.0' implementation 'com.squareup.moshi:moshi:1.13.0'
implementation 'com.serjltt.moshi:moshi-lazy-adapters:2.2' implementation 'com.serjltt.moshi:moshi-lazy-adapters:2.2'
implementation 'org.tomlj:tomlj:1.0.0' implementation 'org.tomlj:tomlj:1.0.0'
implementation 'org.influxdb:influxdb-java:2.22' implementation 'org.influxdb:influxdb-java:2.22'
implementation 'org.slf4j:slf4j-api:1.7.35' implementation 'org.slf4j:slf4j-api:1.7.36'
implementation 'org.slf4j:slf4j-simple:1.7.35' implementation 'org.slf4j:slf4j-simple:1.7.36'
testImplementation 'org.spockframework:spock-core:2.0-groovy-3.0' testImplementation 'org.spockframework:spock-core:2.0-groovy-3.0'
testImplementation 'com.squareup.okhttp3:mockwebserver:4.9.3' testImplementation 'com.squareup.okhttp3:mockwebserver:4.9.3'
testImplementation 'org.slf4j:slf4j-simple:1.7.35' testImplementation 'org.slf4j:slf4j-simple:1.7.36'
} }
application { application {
mainClass.set('biz.nellemann.hmci.Application') mainClass.set('biz.nellemann.hmci.Application')
applicationDefaultJvmArgs = [ "-server", "-Xms256m", "-Xmx1024m", "-XX:+UseG1GC" ] applicationDefaultJvmArgs = [ "-server", "-Xms64m", "-Xmx512m", "-XX:+UseG1GC" ]
} }
test { test {

View file

@ -1,4 +1,4 @@
projectId = hmci projectId = hmci
projectGroup = biz.nellemann.hmci projectGroup = biz.nellemann.hmci
projectVersion = 1.2.7 projectVersion = 1.2.8
projectJavaVersion = 1.8 projectJavaVersion = 1.8

View file

@ -118,6 +118,7 @@ class HmcInstance implements Runnable {
} }
} catch (Exception e) { } catch (Exception e) {
log.error("run() - fatal error: {}", e.getMessage()); log.error("run() - fatal error: {}", e.getMessage());
keepRunning.set(false);
throw new RuntimeException(e); throw new RuntimeException(e);
} }
@ -153,7 +154,7 @@ class HmcInstance implements Runnable {
void discover() { void discover() {
log.trace("discover()"); log.info("discover() - Querying HMC for Managed Systems and Logical Partitions");
Map<String, LogicalPartition> tmpPartitions = new HashMap<>(); Map<String, LogicalPartition> tmpPartitions = new HashMap<>();
@ -167,13 +168,13 @@ class HmcInstance implements Runnable {
// Check excludeSystems and includeSystems // Check excludeSystems and includeSystems
if(!excludeSystems.contains(system.name) && includeSystems.isEmpty()) { if(!excludeSystems.contains(system.name) && includeSystems.isEmpty()) {
systems.put(systemId, system); systems.put(systemId, system);
log.info("discover() - Adding ManagedSystem: {}", system); log.info("discover() - ManagedSystem: {}", system);
if (doEnergy) { if (doEnergy) {
hmcRestClient.enableEnergyMonitoring(system); hmcRestClient.enableEnergyMonitoring(system);
} }
} else if(!includeSystems.isEmpty() && includeSystems.contains(system.name)) { } else if(!includeSystems.isEmpty() && includeSystems.contains(system.name)) {
systems.put(systemId, system); systems.put(systemId, system);
log.info("discover() - Adding ManagedSystem (include): {}", system); log.info("discover() - ManagedSystem (include): {}", system);
if (doEnergy) { if (doEnergy) {
hmcRestClient.enableEnergyMonitoring(system); hmcRestClient.enableEnergyMonitoring(system);
} }
@ -193,10 +194,10 @@ class HmcInstance implements Runnable {
tmpPartitions.forEach((lparKey, lpar) -> { tmpPartitions.forEach((lparKey, lpar) -> {
if(!excludePartitions.contains(lpar.name) && includePartitions.isEmpty()) { if(!excludePartitions.contains(lpar.name) && includePartitions.isEmpty()) {
partitions.put(lparKey, lpar); partitions.put(lparKey, lpar);
log.info("discover() - Adding LogicalPartition: {}", lpar); log.info("discover() - LogicalPartition: {}", lpar);
} else if(!includePartitions.isEmpty() && includePartitions.contains(lpar.name)) { } else if(!includePartitions.isEmpty() && includePartitions.contains(lpar.name)) {
partitions.put(lparKey, lpar); partitions.put(lparKey, lpar);
log.info("discover() - Adding LogicalPartition (include): {}", lpar); log.info("discover() - LogicalPartition (include): {}", lpar);
} else { } else {
log.debug("discover() - Skipping LogicalPartition: {}", lpar); log.debug("discover() - Skipping LogicalPartition: {}", lpar);
} }

View file

@ -21,13 +21,14 @@ import org.influxdb.InfluxDBException;
import org.influxdb.InfluxDBFactory; import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.BatchPoints; import org.influxdb.dto.BatchPoints;
import org.influxdb.dto.Point; import org.influxdb.dto.Point;
import org.influxdb.dto.Query;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import java.time.Instant; import java.time.Instant;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import static java.lang.Thread.sleep; import static java.lang.Thread.sleep;
@ -95,7 +96,7 @@ class InfluxClient {
synchronized void writeBatchPoints() throws Exception { synchronized void writeBatchPoints() throws Exception {
log.trace("writeBatchPoints()"); log.trace("writeBatchPoints()");
try { try {
influxDB.writeWithRetry(batchPoints); influxDB.write(batchPoints);
errorCounter = 0; errorCounter = 0;
} catch (InfluxDBException.DatabaseNotFoundException e) { } catch (InfluxDBException.DatabaseNotFoundException e) {
log.error("writeBatchPoints() - database \"{}\" not found/created: can't write data", database); log.error("writeBatchPoints() - database \"{}\" not found/created: can't write data", database);
@ -350,7 +351,7 @@ class InfluxClient {
measurements.forEach( m -> { measurements.forEach( m -> {
Point.Builder builder = Point.measurement(measurement) Point.Builder builder = Point.measurement(measurement)
.time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS); .time(timestamp.getEpochSecond(), TimeUnit.SECONDS);
// Iterate fields // Iterate fields
m.fields.forEach((fieldName, fieldValue) -> { m.fields.forEach((fieldName, fieldValue) -> {
@ -368,8 +369,9 @@ class InfluxClient {
} }
}); });
// Iterate tags // Iterate sorted tags
m.tags.forEach((tagName, tagValue) -> { Map<String, String> sortedTags = new TreeMap<String, String>(m.tags);
sortedTags.forEach((tagName, tagValue) -> {
log.trace("processMeasurementMap() {} - tagName: {}, tagValue: {}", measurement, tagName, tagValue); log.trace("processMeasurementMap() {} - tagName: {}, tagValue: {}", measurement, tagName, tagValue);
builder.tag(tagName, tagValue); builder.tag(tagName, tagValue);
}); });
@ -381,5 +383,4 @@ class InfluxClient {
return listOfPoints; return listOfPoints;
} }
} }