Changes to influx batch writes.
This commit is contained in:
parent
80138b8d57
commit
bd4cb89892
14
README.md
14
README.md
|
@ -2,24 +2,16 @@
|
|||
|
||||
Small utility to fetch metrics from one or more HMC's and push those to an InfluxDB time-series database.
|
||||
|
||||
## Known Problems
|
||||
|
||||
- When running on Windows, the data is collected and written to InfluxDB, but in Grafana there is no data.
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
### Create Configuration
|
||||
- Ensure you have correct date/time and NTP running to keep it accurate.
|
||||
|
||||
Modify the **/opt/hmci/conf/hmci.groovy** configuration file to suit your environment.
|
||||
|
||||
|
||||
### Run HMCi Tool
|
||||
|
||||
Requires Java 8+ runtime
|
||||
Modify the */opt/hmci/conf/hmci.groovy* configuration file to suit your environment and run the program:
|
||||
|
||||
/opt/hmci/bin/hmci
|
||||
|
||||
Configure Grafana to communicate with your InfluxDB and import dashboards from *doc/* into Grafana. The dashboards are slightly modified versions of the dashboard provided by the nmon2influxdb tool.
|
||||
|
||||
|
||||
## Development Information
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
Configuration for HMCi
|
||||
*/
|
||||
|
||||
hmci.refresh = 60
|
||||
hmci.rescan = 15
|
||||
hmci.refresh = 30
|
||||
hmci.rescan = 60
|
||||
|
||||
// InfluxDB to save metrics
|
||||
influx {
|
||||
|
|
|
@ -60,9 +60,11 @@ class App implements Runnable {
|
|||
|
||||
hmcClients.each { hmcId, hmcClient ->
|
||||
|
||||
log.info("Loggin in to HMC " + hmcId)
|
||||
try {
|
||||
hmcClient.logoff()
|
||||
hmcClient.login()
|
||||
|
||||
log.info("Logging in to HMC " + hmcId)
|
||||
try {
|
||||
hmcClient.getManagedSystems().each { systemId, system ->
|
||||
|
||||
// Add to list of known systems
|
||||
|
@ -185,13 +187,15 @@ class App implements Runnable {
|
|||
while(keepRunning) {
|
||||
|
||||
getMetricsForSystems()
|
||||
writeMetricsForManagedSystems()
|
||||
|
||||
getMetricsForPartitions()
|
||||
|
||||
writeMetricsForManagedSystems()
|
||||
writeMetricsForLogicalPartitions()
|
||||
influxClient.writeBatchPoints()
|
||||
|
||||
// Refresh HMC's
|
||||
if(executions % rescanHmcEvery) {
|
||||
if(executions > rescanHmcEvery) {
|
||||
executions = 0
|
||||
discover()
|
||||
}
|
||||
|
||||
|
|
|
@ -55,10 +55,6 @@ class HmcClient {
|
|||
*/
|
||||
void login() throws IOException {
|
||||
|
||||
if(authToken) {
|
||||
return
|
||||
}
|
||||
|
||||
String payload = """\
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<LogonRequest xmlns="http://www.ibm.com/xmlns/systems/power/firmware/web/mc/2012_10/" schemaVersion="V1_0">
|
||||
|
@ -100,6 +96,11 @@ class HmcClient {
|
|||
*
|
||||
*/
|
||||
void logoff() {
|
||||
|
||||
if(!authToken) {
|
||||
return
|
||||
}
|
||||
|
||||
URL absUrl = new URL(String.format("%s/rest/api/web/Logon", baseUrl))
|
||||
Request request = new Request.Builder()
|
||||
.url(absUrl)
|
||||
|
@ -272,7 +273,13 @@ class HmcClient {
|
|||
.build();
|
||||
|
||||
Response response = client.newCall(request).execute();
|
||||
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
|
||||
if (!response.isSuccessful()) {
|
||||
if(response.code == 401) {
|
||||
login()
|
||||
} else {
|
||||
throw new IOException("Unexpected code " + response)
|
||||
}
|
||||
};
|
||||
|
||||
return response
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ class InfluxClient {
|
|||
final String database
|
||||
|
||||
InfluxDB influxDB
|
||||
BatchPoints batchPoints
|
||||
|
||||
InfluxClient(String url, String username, String password, String database) {
|
||||
this.url = url
|
||||
|
@ -33,6 +34,15 @@ class InfluxClient {
|
|||
try {
|
||||
influxDB = InfluxDBFactory.connect(url, username, password);
|
||||
createDatabase()
|
||||
|
||||
// Enable batch writes to get better performance.
|
||||
BatchOptions options = BatchOptions.DEFAULTS.actions(300).flushDuration(500);
|
||||
influxDB.enableBatch(options);
|
||||
|
||||
influxDB.setLogLevel(InfluxDB.LogLevel.BASIC);
|
||||
|
||||
batchPoints = BatchPoints.database(database).precision(TimeUnit.SECONDS).build();
|
||||
|
||||
} catch(Exception e) {
|
||||
log.error(e.message)
|
||||
throw new Exception(e)
|
||||
|
@ -50,34 +60,21 @@ class InfluxClient {
|
|||
influxDB.query(new Query("CREATE DATABASE " + database));
|
||||
influxDB.setDatabase(database);
|
||||
|
||||
/*
|
||||
/*
|
||||
// ... and a retention policy, if necessary.
|
||||
String retentionPolicyName = "HMCI_ONE_YEAR";
|
||||
influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
|
||||
+ " ON " + database + " DURATION 365d REPLICATION 1 DEFAULT"));
|
||||
influxDB.setRetentionPolicy(retentionPolicyName);
|
||||
*/
|
||||
// Enable batch writes to get better performance.
|
||||
influxDB.enableBatch(BatchOptions.DEFAULTS);
|
||||
|
||||
}
|
||||
|
||||
|
||||
void write() {
|
||||
// Write points to InfluxDB.
|
||||
influxDB.write(Point.measurement("h2o_feet")
|
||||
.time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
|
||||
.tag("location", "santa_monica")
|
||||
.addField("level description", "below 3 feet")
|
||||
.addField("water_level", 2.064d)
|
||||
.build());
|
||||
|
||||
influxDB.write(Point.measurement("h2o_feet")
|
||||
.time(System.currentTimeMillis(), TimeUnit.MILLISECONDS)
|
||||
.tag("location", "coyote_creek")
|
||||
.addField("level description", "between 6 and 9 feet")
|
||||
.addField("water_level", 8.12d)
|
||||
.build());
|
||||
|
||||
void writeBatchPoints() {
|
||||
log.debug("writeBatchPoints()")
|
||||
influxDB.write(batchPoints);
|
||||
//influxDB.flush()
|
||||
}
|
||||
|
||||
|
||||
|
@ -100,7 +97,7 @@ class InfluxClient {
|
|||
return
|
||||
}
|
||||
|
||||
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
//BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
|
||||
getSystemMemory(system, timestamp).each {
|
||||
batchPoints.point(it)
|
||||
|
@ -122,7 +119,6 @@ class InfluxClient {
|
|||
batchPoints.point(it)
|
||||
}
|
||||
|
||||
influxDB.write(batchPoints);
|
||||
}
|
||||
|
||||
private static List<Point> getSystemMemory(ManagedSystem system, Instant timestamp) {
|
||||
|
@ -168,7 +164,7 @@ class InfluxClient {
|
|||
return
|
||||
}
|
||||
|
||||
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
//BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
|
||||
getPartitionMemory(partition, timestamp).each {
|
||||
batchPoints.point(it)
|
||||
|
@ -186,7 +182,7 @@ class InfluxClient {
|
|||
batchPoints.point(it)
|
||||
}
|
||||
|
||||
influxDB.write(batchPoints);
|
||||
//influxDB.write(batchPoints);
|
||||
}
|
||||
|
||||
private static List<Point> getPartitionMemory(LogicalPartition partition, Instant timestamp) {
|
||||
|
|
|
@ -21,15 +21,18 @@ abstract class MetaSystem {
|
|||
|
||||
Instant getTimestamp() {
|
||||
|
||||
String timeStamp = metrics.systemUtil.utilSamples.first().sampleInfo.timeStamp
|
||||
String timestamp = metrics.systemUtil.utilSamples.first().sampleInfo.timeStamp
|
||||
Instant instant
|
||||
try {
|
||||
log.debug("getTimeStamp() - PMC Timestamp: " + timestamp)
|
||||
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss[XXX][X]");
|
||||
instant = Instant.from(dateTimeFormatter.parse(timeStamp))
|
||||
instant = Instant.from(dateTimeFormatter.parse(timestamp))
|
||||
log.debug("getTimestamp() - Instant: " + instant.toString())
|
||||
} catch(DateTimeParseException e) {
|
||||
log.warn("getTimestamp() - parse error: " + timeStamp)
|
||||
log.warn("getTimestamp() - parse error: " + timestamp)
|
||||
}
|
||||
return instant
|
||||
|
||||
return instant ?: Instant.now()
|
||||
}
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue