Update 3rd party deps., and implement option disable query for energy metrics

This commit is contained in:
Mark Nellemann 2022-02-08 21:39:42 +01:00
parent 33f96efae9
commit e0dc65a1e6
15 changed files with 184 additions and 178 deletions

View File

@ -41,12 +41,21 @@ There are few steps in the installation.
- Enable *Performance Monitoring Data Collection for Managed Servers*: **All On**
- Set *Performance Data Storage* to **1** day or preferable more
If you do not enable *Performance Monitoring Data Collection for Managed Servers*, you will see errors such as *Unexpected response: 403*. Use the *hmci* debug flag to get more details about what is going on.
### 2 - InfluxDB and Grafana Installation
Install InfluxDB on an LPAR or VM, which is network accessible by the *HMCi* utility (the default InfluxDB port is 8086). You can install Grafana on the same server or any server which are able to connect to the InfluxDB database. The Grafana installation needs to be accessible from your browser. The default settings for both InfluxDB and Grafana will work fine as a start.
Install InfluxDB (v. **1.8** for best compatibility with Grafana) on an LPAR or VM, which is network accessible by the *HMCi* utility (the default InfluxDB port is 8086). You can install Grafana on the same server or any server which are able to connect to the InfluxDB database. The Grafana installation needs to be accessible from your browser. The default settings for both InfluxDB and Grafana will work fine as a start.
- You can download [Grafana ppc64le](https://www.power-devops.com/grafana) and [InfluxDB ppc64le](https://www.power-devops.com/influxdb) packages for most Linux distributions and AIX on the [Power DevOps](https://www.power-devops.com/) site.
- Binaries for amd64/x86 are available from the [Grafana website](https://grafana.com/grafana/download) and [InfluxDB website](https://portal.influxdata.com/downloads/) and most likely directly from your Linux distributions repositories.
- Create the empty *hmci* database through the **influx** cli command:
```text
CREATE DATABASE "hmci" WITH DURATION 365d REPLICATION 1;
```
See the [Influx documentation](https://docs.influxdata.com/influxdb/v1.8/query_language/manage-database/#create-database) for more information on duration and replication.
### 3 - HMCi Installation & Configuration
@ -59,7 +68,7 @@ Install *HMCi* on a host, which can connect to the Power HMC through HTTPS, and
- On DEB based systems: **sudo dpkg -i hmci_x.y.z-n_all.deb**
- Copy the **/opt/hmci/doc/hmci.toml** configuration example into **/etc/hmci.toml** and edit the configuration to suit your environment. The location of the configuration file can be changed with the *--conf* option.
- Run the **/opt/hmci/bin/hmci** program in a shell, as a @reboot cron task or configure as a proper service - there are instructions in the *doc/readme-service.md* file.
- When started, *hmci* will try to create the InfluxDB database named hmci, if not found.
- When started, *hmci* expects the InfluxDB database to be created by you.
### 4 - Grafana Configuration

View File

@ -6,9 +6,9 @@ plugins {
// Code coverage of tests
id 'jacoco'
id "com.github.johnrengelman.shadow" version "7.0.0"
id "net.nemerosa.versioning" version "2.14.0"
id "nebula.ospackage" version "8.5.6"
id "com.github.johnrengelman.shadow" version "7.1.2"
id "net.nemerosa.versioning" version "2.15.1"
id "nebula.ospackage" version "9.1.1"
}
repositories {
@ -16,21 +16,26 @@ repositories {
mavenLocal()
}
group = projectGroup
version = projectVersion
sourceCompatibility = projectJavaVersion
targetCompatibility = projectJavaVersion
dependencies {
annotationProcessor 'info.picocli:picocli-codegen:4.6.1'
annotationProcessor 'info.picocli:picocli-codegen:4.6.2'
implementation 'info.picocli:picocli:4.6.1'
implementation 'org.jsoup:jsoup:1.14.3'
implementation 'com.squareup.okhttp3:okhttp:4.9.2'
implementation 'com.squareup.moshi:moshi:1.12.0'
implementation 'com.squareup.okhttp3:okhttp:4.9.3'
implementation 'com.squareup.moshi:moshi:1.13.0'
implementation 'com.serjltt.moshi:moshi-lazy-adapters:2.2'
implementation 'org.tomlj:tomlj:1.0.0'
implementation 'org.influxdb:influxdb-java:2.21'
implementation 'org.slf4j:slf4j-api:1.7.32'
implementation 'org.slf4j:slf4j-simple:1.7.32'
implementation 'org.slf4j:slf4j-api:1.7.35'
implementation 'org.slf4j:slf4j-simple:1.7.35'
testImplementation 'org.spockframework:spock-core:2.0-groovy-3.0'
testImplementation 'com.squareup.okhttp3:mockwebserver:4.9.2'
testImplementation 'org.slf4j:slf4j-simple:1.7.32'
testImplementation 'com.squareup.okhttp3:mockwebserver:4.9.3'
testImplementation 'org.slf4j:slf4j-simple:1.7.35'
}
application {
@ -120,12 +125,7 @@ jar {
tasks.create("packages") {
group "build"
dependsOn ":build"
dependsOn ":buildDeb"
dependsOn ":buildRpm"
}
sourceCompatibility = 1.8
targetCompatibility = 1.8

View File

@ -14,20 +14,18 @@ username = "root"
password = ""
database = "hmci"
# One or more HMC's to query for data and metrics
[hmc]
# HMC on our primary site
# HMC to query for data and metrics
[hmc.site1]
url = "https://10.10.10.10:12443"
username = "hmci"
password = "hmcihmci"
unsafe = true # Ignore SSL cert. errors
# Example
# Another HMC example
#[hmc.site2]
#url = "https://10.10.20.20:12443"
#username = "viewer"
#password = "someSecret"
#unsafe = false
#unsafe = false # When false, validate SSL/TLS cerfificate, default is true
#energy = false # When false, do not collect energy metrics, default is true
#trace = "/tmp/hmci-trace" # When present, store JSON metrics files from HMC into this folder

View File

@ -1,3 +1,4 @@
id = hmci
group = biz.nellemann.hmci
version = 1.2.5
projectId = hmci
projectGroup = biz.nellemann.hmci
projectVersion = 1.2.6
projectJavaVersion = 1.8

View File

@ -1,10 +1 @@
/*
* This file was generated by the Gradle 'init' task.
*
* The settings file is used to specify which projects to include in your build.
*
* Detailed information about configuring a multi-project build in Gradle can be found
* in the user manual at https://docs.gradle.org/6.5.1/userguide/multi_project_builds.html
*/
rootProject.name = 'hmci'

View File

@ -91,6 +91,12 @@ public final class Configuration {
c.unsafe = false;
}
if(hmcTable.contains(key+".energy")) {
c.energy = hmcTable.getBoolean(key+".energy");
} else {
c.energy = true;
}
if(hmcTable.contains(key+".trace")) {
c.trace = hmcTable.getString(key+".trace");
} else {
@ -185,6 +191,7 @@ public final class Configuration {
String username;
String password;
Boolean unsafe = false;
Boolean energy = true;
String trace;
Long update = 30L;
Long rescan = 60L;

View File

@ -47,19 +47,22 @@ class HmcInstance implements Runnable {
private File traceDir;
private Boolean doTrace = false;
private Boolean doEnergy = true;
HmcInstance(HmcObject configHmc, InfluxClient influxClient) {
this.hmcId = configHmc.name;
this.updateValue = configHmc.update;
this.rescanValue = configHmc.rescan;
this.doEnergy = configHmc.energy;
this.influxClient = influxClient;
hmcRestClient = new HmcRestClient(configHmc.url, configHmc.username, configHmc.password, configHmc.unsafe);
log.debug(String.format("HmcInstance() - id: %s, update: %s, refresh %s", hmcId, updateValue, rescanValue));
log.debug("HmcInstance() - id: {}, update: {}, refresh {}", hmcId, updateValue, rescanValue);
if(configHmc.trace != null) {
try {
traceDir = new File(configHmc.trace);
if(traceDir.mkdirs() && traceDir.canWrite()) {
traceDir.mkdirs();
if(traceDir.canWrite()) {
doTrace = true;
} else {
log.warn("HmcInstance() - can't write to trace dir: " + traceDir.toString());
@ -80,7 +83,7 @@ class HmcInstance implements Runnable {
@Override
public void run() {
log.debug("run() - " + hmcId);
log.trace("run() - " + hmcId);
int executions = 0;
discover();
@ -88,7 +91,9 @@ class HmcInstance implements Runnable {
do {
Instant instantStart = Instant.now();
try {
getMetricsForEnergy();
if(doEnergy) {
getMetricsForEnergy();
}
getMetricsForSystems();
getMetricsForPartitions();
@ -109,11 +114,11 @@ class HmcInstance implements Runnable {
Instant instantEnd = Instant.now();
long timeSpend = Duration.between(instantStart, instantEnd).toMillis();
log.debug("run() - duration millis: " + timeSpend);
log.trace("run() - duration millis: " + timeSpend);
if(timeSpend < (updateValue * 1000)) {
try {
long sleepTime = (updateValue * 1000) - timeSpend;
log.debug("run() - sleeping millis: " + sleepTime);
log.trace("run() - sleeping millis: " + sleepTime);
if(sleepTime > 0) {
//noinspection BusyWait
sleep(sleepTime);
@ -122,7 +127,7 @@ class HmcInstance implements Runnable {
log.error("run() - sleep interrupted", e);
}
} else {
log.warn("run() - slow response from HMC");
log.warn("run() - possible slow response from this HMC");
}
} while (keepRunning.get());
@ -131,7 +136,7 @@ class HmcInstance implements Runnable {
try {
hmcRestClient.logoff();
} catch (IOException e) {
log.warn("run() - error logging out: " + e.getMessage());
log.warn("run() - error logging out of HMC: " + e.getMessage());
}
}
@ -139,7 +144,7 @@ class HmcInstance implements Runnable {
void discover() {
log.debug("discover()");
log.trace("discover()");
Map<String, LogicalPartition> tmpPartitions = new HashMap<>();
@ -151,7 +156,9 @@ class HmcInstance implements Runnable {
if(!systems.containsKey(systemId)) {
systems.put(systemId, system);
log.info("discover() - Found ManagedSystem: " + system);
hmcRestClient.enableEnergyMonitoring(system);
if(doEnergy) {
hmcRestClient.enableEnergyMonitoring(system);
}
}
// Get partitions for this system
@ -162,13 +169,13 @@ class HmcInstance implements Runnable {
partitions.putAll(tmpPartitions);
}
} catch (Exception e) {
log.warn("discover() - getLogicalPartitions", e);
log.warn("discover() - getLogicalPartitions error: {}", e.getMessage());
}
});
} catch(Exception e) {
log.warn("discover() - getManagedSystems: " + e.getMessage());
log.warn("discover() - getManagedSystems error: {}", e.getMessage());
}
@ -184,7 +191,7 @@ class HmcInstance implements Runnable {
try {
tmpJsonString = hmcRestClient.getPcmDataForManagedSystem(system);
} catch (Exception e) {
log.warn("getMetricsForSystems() " + e.getMessage());
log.warn("getMetricsForSystems() - error: {}", e.getMessage());
}
if(tmpJsonString != null && !tmpJsonString.isEmpty()) {
@ -212,7 +219,7 @@ class HmcInstance implements Runnable {
try {
tmpJsonString2 = hmcRestClient.getPcmDataForLogicalPartition(partition);
} catch (Exception e) {
log.warn("getMetricsForPartitions() - getPcmDataForLogicalPartition " + e.getMessage());
log.warn("getMetricsForPartitions() - getPcmDataForLogicalPartition error: {}", e.getMessage());
}
if(tmpJsonString2 != null && !tmpJsonString2.isEmpty()) {
partition.processMetrics(tmpJsonString2);
@ -224,7 +231,7 @@ class HmcInstance implements Runnable {
});
} catch(Exception e) {
log.warn("getMetricsForPartitions() " + e.getMessage());
log.warn("getMetricsForPartitions() - error: {}", e.getMessage());
}
}
@ -238,7 +245,7 @@ class HmcInstance implements Runnable {
try {
tmpJsonString = hmcRestClient.getPcmDataForEnergy(system.energy);
} catch (Exception e) {
log.warn("getMetricsForEnergy() " + e.getMessage());
log.warn("getMetricsForEnergy() - error: {}", e.getMessage());
}
if(tmpJsonString != null && !tmpJsonString.isEmpty()) {
@ -254,7 +261,7 @@ class HmcInstance implements Runnable {
try {
systems.forEach((systemId, system) -> influxClient.writeManagedSystem(system));
} catch (NullPointerException npe) {
log.warn("writeMetricsForManagedSystems() - NPE: " + npe.getMessage(), npe);
log.warn("writeMetricsForManagedSystems() - NPE: {}", npe.getMessage(), npe);
}
}
@ -263,7 +270,7 @@ class HmcInstance implements Runnable {
try {
partitions.forEach((partitionId, partition) -> influxClient.writeLogicalPartition(partition));
} catch (NullPointerException npe) {
log.warn("writeMetricsForLogicalPartitions() - NPE: " + npe.getMessage(), npe);
log.warn("writeMetricsForLogicalPartitions() - NPE: {}", npe.getMessage(), npe);
}
}
@ -272,7 +279,7 @@ class HmcInstance implements Runnable {
try {
systems.forEach((systemId, system) -> influxClient.writeSystemEnergy(system.energy));
} catch (NullPointerException npe) {
log.warn("writeMetricsForSystemEnergy() - NPE: " + npe.getMessage(), npe);
log.warn("writeMetricsForSystemEnergy() - NPE: {}", npe.getMessage(), npe);
}
}

View File

@ -107,7 +107,7 @@ public class HmcRestClient {
Response response = client.newCall(request).execute();
String responseBody = Objects.requireNonNull(response.body()).string();
if (!response.isSuccessful()) {
log.warn("login() - Unexpected response: " + response.code());
log.warn("login() - Unexpected response: {}", response.code());
throw new IOException("Unexpected code: " + response);
}
@ -115,10 +115,10 @@ public class HmcRestClient {
authToken = doc.select("X-API-Session").text();
log.debug("login() - Auth Token: " + authToken);
} catch (MalformedURLException e) {
log.error("login() - URL Error: " + e.getMessage());
log.error("login() - URL Error: {}", e.getMessage());
throw e;
} catch (Exception e) {
log.error("login() - Error: " + e.getMessage());
log.error("login() - Error: {}", e.getMessage());
throw e;
}
@ -146,7 +146,7 @@ public class HmcRestClient {
try {
client.newCall(request).execute();
} catch (IOException e) {
log.warn("logoff() error: " + e.getMessage());
log.warn("logoff() error: {}", e.getMessage());
} finally {
authToken = null;
}
@ -184,11 +184,11 @@ public class HmcRestClient {
el.select("MachineTypeModelAndSerialNumber > SerialNumber").text()
);
managedSystemsMap.put(system.id, system);
log.debug("getManagedSystems() - Found system: " + system);
log.debug("getManagedSystems() - Found system: {}", system);
}
} catch(Exception e) {
log.warn("getManagedSystems() - xml parse error", e);
log.warn("getManagedSystems() - XML parse error", e);
}
return managedSystemsMap;
@ -223,11 +223,11 @@ public class HmcRestClient {
system
);
partitionMap.put(logicalPartition.id, logicalPartition);
log.debug("getLogicalPartitionsForManagedSystem() - Found partition: " + logicalPartition);
log.debug("getLogicalPartitionsForManagedSystem() - Found partition: {}", logicalPartition);
}
} catch(Exception e) {
log.warn("getLogicalPartitionsForManagedSystem() - xml parse error", e);
log.warn("getLogicalPartitionsForManagedSystem() - XML parse error: {}", system, e);
}
return partitionMap;
@ -241,7 +241,7 @@ public class HmcRestClient {
*/
String getPcmDataForManagedSystem(ManagedSystem system) throws Exception {
log.trace("getPcmDataForManagedSystem() - " + system.id);
log.trace("getPcmDataForManagedSystem() - {}", system.id);
URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?NoOfSamples=1", baseUrl, system.id));
String responseBody = sendGetRequest(url);
String jsonBody = null;
@ -249,7 +249,7 @@ public class HmcRestClient {
// Do not try to parse empty response
if(responseBody == null || responseBody.length() <= 1) {
responseErrors++;
log.warn("getPcmDataForManagedSystem() - empty response, skipping: " + system.name);
log.warn("getPcmDataForManagedSystem() - empty response, skipping: {}", system.name);
return null;
}
@ -260,12 +260,12 @@ public class HmcRestClient {
if(Objects.requireNonNull(link).attr("type").equals("application/json")) {
String href = link.attr("href");
log.trace("getPcmDataForManagedSystem() - json url: " + href);
log.trace("getPcmDataForManagedSystem() - URL: {}", href);
jsonBody = sendGetRequest(new URL(href));
}
} catch(Exception e) {
log.warn("getPcmDataForManagedSystem() - xml parse error", e);
log.warn("getPcmDataForManagedSystem() - XML parse error: {}", system, e);
}
return jsonBody;
@ -279,7 +279,7 @@ public class HmcRestClient {
*/
String getPcmDataForLogicalPartition(LogicalPartition partition) throws Exception {
log.trace(String.format("getPcmDataForLogicalPartition() - %s @ %s", partition.id, partition.system.id));
log.trace("getPcmDataForLogicalPartition() - {} @ {}", partition.id, partition.system.id);
URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/LogicalPartition/%s/ProcessedMetrics?NoOfSamples=1", baseUrl, partition.system.id, partition.id));
String responseBody = sendGetRequest(url);
String jsonBody = null;
@ -287,7 +287,7 @@ public class HmcRestClient {
// Do not try to parse empty response
if(responseBody == null || responseBody.length() <= 1) {
responseErrors++;
log.warn("getPcmDataForLogicalPartition() - empty response, skipping: " + partition.name);
log.warn("getPcmDataForLogicalPartition() - empty response, skipping: {}", partition.name);
return null;
}
@ -298,12 +298,12 @@ public class HmcRestClient {
if(Objects.requireNonNull(link).attr("type").equals("application/json")) {
String href = link.attr("href");
log.trace("getPcmDataForLogicalPartition() - json url: " + href);
log.trace("getPcmDataForLogicalPartition() - URL: {}", href);
jsonBody = sendGetRequest(new URL(href));
}
} catch(Exception e) {
log.warn("getPcmDataForLogicalPartition() - xml parse error", e);
log.warn("getPcmDataForLogicalPartition() - XML parse error: {}", partition.id, e);
}
return jsonBody;
@ -327,7 +327,7 @@ public class HmcRestClient {
// Do not try to parse empty response
if(responseBody == null || responseBody.length() <= 1) {
responseErrors++;
log.trace("getPcmDataForEnergy() - empty response");
log.trace("getPcmDataForEnergy() - empty response, skipping: {}", systemEnergy);
return null;
}
@ -338,12 +338,12 @@ public class HmcRestClient {
if(Objects.requireNonNull(link).attr("type").equals("application/json")) {
String href = link.attr("href");
log.trace("getPcmDataForEnergy() - json url: " + href);
log.trace("getPcmDataForEnergy() - URL: {}", href);
jsonBody = sendGetRequest(new URL(href));
}
} catch(Exception e) {
log.warn("getPcmDataForEnergy() - xml parse error", e);
log.warn("getPcmDataForEnergy() - XML parse error: {}", systemEnergy, e);
}
return jsonBody;
@ -356,16 +356,15 @@ public class HmcRestClient {
*/
void enableEnergyMonitoring(ManagedSystem system) {
log.trace("enableEnergyMonitoring() - " + system.id);
log.trace("enableEnergyMonitoring() - {}", system);
try {
URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/preferences", baseUrl, system.id));
String responseBody = sendGetRequest(url);
String jsonBody = null;
// Do not try to parse empty response
if(responseBody == null || responseBody.length() <= 1) {
responseErrors++;
log.warn("enableEnergyMonitoring() - empty response");
log.warn("enableEnergyMonitoring() - empty response, skipping: {}", system);
return;
}
@ -396,7 +395,7 @@ public class HmcRestClient {
}
} catch (Exception e) {
log.warn("enableEnergyMonitoring() - Exception: " + e.getMessage());
log.debug("enableEnergyMonitoring() - Error: {}", e.getMessage());
}
}
@ -409,7 +408,7 @@ public class HmcRestClient {
*/
private String sendGetRequest(URL url) throws Exception {
log.trace("getResponse() - URL: " + url.toString());
log.trace("getResponse() - URL: {}", url.toString());
if(authToken == null) {
return null;
}
@ -434,7 +433,7 @@ public class HmcRestClient {
return null;
}
log.error("getResponse() - Unexpected response: " + response.code());
log.error("getResponse() - Unexpected response: {}", response.code());
throw new IOException("getResponse() - Unexpected response: " + response.code());
}
@ -451,7 +450,7 @@ public class HmcRestClient {
*/
public String sendPostRequest(URL url, String payload) throws Exception {
log.trace("sendPostRequest() - URL: " + url.toString());
log.trace("sendPostRequest() - URL: {}", url.toString());
if(authToken == null) {
return null;
}
@ -478,7 +477,7 @@ public class HmcRestClient {
if (!response.isSuccessful()) {
response.close();
log.warn(body);
log.error("sendPostRequest() - Unexpected response: " + response.code());
log.error("sendPostRequest() - Unexpected response: {}", response.code());
throw new IOException("sendPostRequest() - Unexpected response: " + response.code());
}

View File

@ -17,6 +17,7 @@ package biz.nellemann.hmci;
import biz.nellemann.hmci.Configuration.InfluxObject;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBException;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.BatchPoints;
import org.influxdb.dto.Point;
@ -64,18 +65,17 @@ class InfluxClient {
do {
try {
log.debug("Connecting to InfluxDB - " + url);
influxDB = InfluxDBFactory.connect(url, username, password);
createDatabase();
log.debug("Connecting to InfluxDB - {}", url);
influxDB = InfluxDBFactory.connect(url, username, password).setDatabase(database);
batchPoints = BatchPoints.database(database).precision(TimeUnit.SECONDS).build();
connected = true;
} catch(Exception e) {
sleep(15 * 1000);
if(loginErrors++ > 3) {
log.error("login() error, giving up - " + e.getMessage());
log.error("login() - error, giving up: {}", e.getMessage());
throw new RuntimeException(e);
} else {
log.warn("login() error, retrying - " + e.getMessage());
log.warn("login() - error, retrying: {}", e.getMessage());
}
}
} while(!connected);
@ -91,19 +91,15 @@ class InfluxClient {
}
void createDatabase() {
// Create our database... with a default retention of 156w == 3 years
influxDB.query(new Query("CREATE DATABASE " + database + " WITH DURATION 156w"));
influxDB.setDatabase(database);
}
synchronized void writeBatchPoints() throws Exception {
log.debug("writeBatchPoints()");
log.trace("writeBatchPoints()");
try {
influxDB.writeWithRetry(batchPoints);
} catch (InfluxDBException.DatabaseNotFoundException e) {
log.error("writeBatchPoints() - database \"{}\" not found/created: can't write data", database);
} catch(Exception e) {
log.warn("writeBatchPoints() " + e.getMessage());
e.printStackTrace();
log.warn("writeBatchPoints() {}", e.getMessage());
if(++errorCounter > 5) {
errorCounter = 0;
logoff();
@ -122,13 +118,13 @@ class InfluxClient {
void writeManagedSystem(ManagedSystem system) {
if(system.metrics == null) {
log.trace("writeManagedSystem() - null metrics, skipping: " + system.name);
log.trace("writeManagedSystem() - null metrics, skipping: {}", system.name);
return;
}
Instant timestamp = system.getTimestamp();
if(timestamp == null) {
log.warn("writeManagedSystem() - no timestamp, skipping: " + system.name);
log.warn("writeManagedSystem() - no timestamp, skipping: {}", system.name);
return;
}
@ -253,13 +249,13 @@ class InfluxClient {
void writeLogicalPartition(LogicalPartition partition) {
if(partition.metrics == null) {
log.warn("writeLogicalPartition() - null metrics, skipping: " + partition.name);
log.warn("writeLogicalPartition() - null metrics, skipping: {}", partition.name);
return;
}
Instant timestamp = partition.getTimestamp();
if(timestamp == null) {
log.warn("writeLogicalPartition() - no timestamp, skipping: " + partition.name);
log.warn("writeLogicalPartition() - no timestamp, skipping: {}", partition.name);
return;
}
@ -314,13 +310,13 @@ class InfluxClient {
void writeSystemEnergy(SystemEnergy systemEnergy) {
if(systemEnergy.metrics == null) {
log.trace("writeSystemEnergy() - null metrics, skipping: " + systemEnergy.system.name);
log.trace("writeSystemEnergy() - null metrics, skipping: {}", systemEnergy.system.name);
return;
}
Instant timestamp = systemEnergy.getTimestamp();
if(timestamp == null) {
log.warn("writeSystemEnergy() - no timestamp, skipping: " + systemEnergy.system.name);
log.warn("writeSystemEnergy() - no timestamp, skipping: {}", systemEnergy.system.name);
return;
}
@ -354,7 +350,7 @@ class InfluxClient {
// Iterate fields
m.fields.forEach((fieldName, fieldValue) -> {
log.trace("processMeasurementMap() " + measurement + " - fieldName: " + fieldName + ", fieldValue: " + fieldValue);
log.trace("processMeasurementMap() {} - fieldName: {}, fieldValue: {}", measurement, fieldName, fieldValue);
if(fieldValue instanceof Number) {
Number num = (Number) fieldValue;
builder.addField(fieldName, num);
@ -369,7 +365,7 @@ class InfluxClient {
// Iterate tags
m.tags.forEach((tagName, tagValue) -> {
log.trace("processMeasurementMap() " + measurement + " - tagName: " + tagName + ", tagValue: " + tagValue);
log.trace("processMeasurementMap() {} - tagName: {}, tagValue: {}", measurement, tagName, tagValue);
builder.tag(tagName, tagValue);
});

View File

@ -55,7 +55,7 @@ class LogicalPartition extends MetaSystem {
Map<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", system.name);
tagsMap.put("lparname", name);
log.trace("getDetails() - tags: " + tagsMap);
log.trace("getDetails() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("id", metrics.systemUtil.sample.lparsUtil.id);
@ -63,7 +63,7 @@ class LogicalPartition extends MetaSystem {
fieldsMap.put("state", metrics.systemUtil.sample.lparsUtil.state);
fieldsMap.put("osType", metrics.systemUtil.sample.lparsUtil.osType);
fieldsMap.put("affinityScore", metrics.systemUtil.sample.lparsUtil.affinityScore);
log.trace("getDetails() - fields: " + fieldsMap);
log.trace("getDetails() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -78,12 +78,12 @@ class LogicalPartition extends MetaSystem {
Map<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", system.name);
tagsMap.put("lparname", name);
log.trace("getMemoryMetrics() - tags: " + tagsMap);
log.trace("getMemoryMetrics() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("logicalMem", metrics.systemUtil.sample.lparsUtil.memory.logicalMem);
fieldsMap.put("backedPhysicalMem", metrics.systemUtil.sample.lparsUtil.memory.backedPhysicalMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
log.trace("getMemoryMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -98,7 +98,7 @@ class LogicalPartition extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", system.name);
tagsMap.put("lparname", name);
log.trace("getProcessorMetrics() - tags: " + tagsMap);
log.trace("getProcessorMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("utilizedProcUnits", metrics.systemUtil.sample.lparsUtil.processor.utilizedProcUnits);
@ -115,7 +115,7 @@ class LogicalPartition extends MetaSystem {
fieldsMap.put("mode", metrics.systemUtil.sample.lparsUtil.processor.mode);
fieldsMap.put("weight", metrics.systemUtil.sample.lparsUtil.processor.weight);
fieldsMap.put("poolId", metrics.systemUtil.sample.lparsUtil.processor.poolId);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
log.trace("getProcessorMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -136,7 +136,7 @@ class LogicalPartition extends MetaSystem {
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("vlanId", adapter.vlanId.toString());
tagsMap.put("vswitchId", adapter.vswitchId.toString());
log.trace("getVirtualEthernetAdapterMetrics() - tags: " + tagsMap);
log.trace("getVirtualEthernetAdapterMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("droppedPackets", adapter.droppedPackets);
@ -153,7 +153,7 @@ class LogicalPartition extends MetaSystem {
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
fieldsMap.put("sharedEthernetAdapterId", adapter.sharedEthernetAdapterId);
log.trace("getVirtualEthernetAdapterMetrics() - fields: " + fieldsMap);
log.trace("getVirtualEthernetAdapterMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -174,7 +174,7 @@ class LogicalPartition extends MetaSystem {
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.trace("getVirtualGenericAdapterMetrics() - tags: " + tagsMap);
log.trace("getVirtualGenericAdapterMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
@ -182,7 +182,7 @@ class LogicalPartition extends MetaSystem {
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("type", adapter.type);
log.trace("getVirtualGenericAdapterMetrics() - fields: " + fieldsMap);
log.trace("getVirtualGenericAdapterMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -201,7 +201,7 @@ class LogicalPartition extends MetaSystem {
tagsMap.put("lparname", name);
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVirtualFibreChannelAdapterMetrics() - tags: " + tagsMap);
log.trace("getVirtualFibreChannelAdapterMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
@ -213,7 +213,7 @@ class LogicalPartition extends MetaSystem {
fieldsMap.put("transferredByte", adapter.transmittedBytes); // TODO: Must be error in dashboard, remove when checked.
//fieldsMap.put("wwpn", adapter.wwpn);
//fieldsMap.put("wwpn2", adapter.wwpn2);
log.trace("getVirtualFibreChannelAdapterMetrics() - fields: " + fieldsMap);
log.trace("getVirtualFibreChannelAdapterMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});

View File

@ -58,7 +58,7 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
log.trace("getDetails() - tags: " + tagsMap);
log.trace("getDetails() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("mtm", String.format("%s-%s %s", type, model, serialNumber));
@ -69,7 +69,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("name", name);
fieldsMap.put("utilizedProcUnits", metrics.systemUtil.sample.systemFirmwareUtil.utilizedProcUnits);
fieldsMap.put("assignedMem", metrics.systemUtil.sample.systemFirmwareUtil.assignedMem);
log.trace("getDetails() - fields: " + fieldsMap);
log.trace("getDetails() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -83,7 +83,7 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
log.trace("getMemoryMetrics() - tags: " + tagsMap);
log.trace("getMemoryMetrics() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("totalMem", metrics.systemUtil.sample.serverUtil.memory.totalMem);
@ -91,7 +91,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("configurableMem", metrics.systemUtil.sample.serverUtil.memory.configurableMem);
fieldsMap.put("assignedMemToLpars", metrics.systemUtil.sample.serverUtil.memory.assignedMemToLpars);
fieldsMap.put("virtualPersistentMem", metrics.systemUtil.sample.serverUtil.memory.virtualPersistentMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
log.trace("getMemoryMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -105,14 +105,14 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
log.trace("getProcessorMetrics() - tags: " + tagsMap);
log.trace("getProcessorMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("totalProcUnits", metrics.systemUtil.sample.serverUtil.processor.totalProcUnits);
fieldsMap.put("utilizedProcUnits", metrics.systemUtil.sample.serverUtil.processor.utilizedProcUnits);
fieldsMap.put("availableProcUnits", metrics.systemUtil.sample.serverUtil.processor.availableProcUnits);
fieldsMap.put("configurableProcUnits", metrics.systemUtil.sample.serverUtil.processor.configurableProcUnits);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
log.trace("getProcessorMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -128,7 +128,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("servername", name);
tagsMap.put("pool", sharedProcessorPool.id);
tagsMap.put("poolname", sharedProcessorPool.name);
log.trace("getSharedProcessorPools() - tags: " + tagsMap);
log.trace("getSharedProcessorPools() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("assignedProcUnits", sharedProcessorPool.assignedProcUnits);
@ -136,7 +136,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("utilizedProcUnits", sharedProcessorPool.utilizedProcUnits);
fieldsMap.put("borrowedProcUnits", sharedProcessorPool.borrowedProcUnits);
fieldsMap.put("configuredProcUnits", sharedProcessorPool.configuredProcUnits);
log.trace("getSharedProcessorPools() - fields: " + fieldsMap);
log.trace("getSharedProcessorPools() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -152,7 +152,7 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
log.trace("getPhysicalProcessorPool() - tags: " + tagsMap);
log.trace("getPhysicalProcessorPool() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("assignedProcUnits", metrics.systemUtil.sample.serverUtil.physicalProcessorPool.assignedProcUnits);
@ -160,7 +160,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("utilizedProcUnits", metrics.systemUtil.sample.serverUtil.physicalProcessorPool.utilizedProcUnits);
fieldsMap.put("configuredProcUnits", metrics.systemUtil.sample.serverUtil.physicalProcessorPool.configuredProcUnits);
fieldsMap.put("borrowedProcUnits", metrics.systemUtil.sample.serverUtil.physicalProcessorPool.borrowedProcUnits);
log.trace("getPhysicalProcessorPool() - fields: " + fieldsMap);
log.trace("getPhysicalProcessorPool() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -176,14 +176,14 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
log.trace("getViosDetails() - tags: " + tagsMap);
log.trace("getViosDetails() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("viosid", vios.id);
fieldsMap.put("viosstate", vios.state);
fieldsMap.put("viosname", vios.name);
fieldsMap.put("affinityScore", vios.affinityScore);
log.trace("getViosDetails() - fields: " + fieldsMap);
log.trace("getViosDetails() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -201,7 +201,7 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
log.trace("getViosMemoryMetrics() - tags: " + tagsMap);
log.trace("getViosMemoryMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
Number assignedMem = getNumberMetricObject(vios.memory.assignedMem);
@ -216,7 +216,7 @@ class ManagedSystem extends MetaSystem {
Number usedMemPct = (utilizedMem.intValue() * 100 ) / assignedMem.intValue();
fieldsMap.put("utilizedPct", usedMemPct.floatValue());
}
log.trace("getViosMemoryMetrics() - fields: " + fieldsMap);
log.trace("getViosMemoryMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -234,7 +234,7 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
log.trace("getViosProcessorMetrics() - tags: " + tagsMap);
log.trace("getViosProcessorMetrics() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("utilizedProcUnits", vios.processor.utilizedProcUnits);
@ -250,7 +250,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("timePerInstructionExecution", vios.processor.timeSpentWaitingForDispatch);
fieldsMap.put("weight", vios.processor.weight);
fieldsMap.put("mode", vios.processor.mode);
log.trace("getViosProcessorMetrics() - fields: " + fieldsMap);
log.trace("getViosProcessorMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -268,11 +268,11 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
log.trace("getViosNetworkLpars() - tags: " + tagsMap);
log.trace("getViosNetworkLpars() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("clientlpars", vios.network.clientLpars.size());
log.trace("getViosNetworkLpars() - fields: " + fieldsMap);
log.trace("getViosNetworkLpars() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -294,7 +294,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("viosname", vios.name);
//tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getViosNetworkSharedAdapters() - tags: " + tagsMap);
log.trace("getViosNetworkSharedAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("id", adapter.id);
@ -305,7 +305,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getViosNetworkSharedAdapters() - fields: " + fieldsMap);
log.trace("getViosNetworkSharedAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -331,7 +331,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("systemname", name);
tagsMap.put("viosname", vios.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getViosNetworkVirtualAdapters() - tags: " + tagsMap);
log.trace("getViosNetworkVirtualAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("droppedPackets", adapter.droppedPackets);
@ -347,7 +347,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("sentPhysicalPackets", adapter.sentPhysicalPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
log.trace("getViosNetworkVirtualAdapters() - fields: " + fieldsMap);
log.trace("getViosNetworkVirtualAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -372,7 +372,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getViosNetworkGenericAdapters() - tags: " + tagsMap);
log.trace("getViosNetworkGenericAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("sentBytes", adapter.sentBytes);
@ -381,7 +381,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getViosNetworkGenericAdapters() - fields: " + fieldsMap);
log.trace("getViosNetworkGenericAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -400,11 +400,11 @@ class ManagedSystem extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
log.trace("getViosStorageLpars() - tags: " + tagsMap);
log.trace("getViosStorageLpars() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("clientlpars", vios.storage.clientLpars.size());
log.trace("getViosStorageLpars() - fields: " + fieldsMap);
log.trace("getViosStorageLpars() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -417,7 +417,7 @@ class ManagedSystem extends MetaSystem {
List<Measurement> list = new ArrayList<>();
metrics.systemUtil.sample.viosUtil.forEach( vios -> {
log.trace("getViosStorageFiberChannelAdapters() - VIOS: " + vios.name);
log.trace("getViosStorageFiberChannelAdapters() - VIOS: {}", vios.name);
vios.storage.fiberChannelAdapters.forEach( adapter -> {
@ -426,7 +426,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("servername", name);
tagsMap.put("viosname", vios.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getViosStorageFiberChannelAdapters() - tags: " + tagsMap);
log.trace("getViosStorageFiberChannelAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
@ -434,7 +434,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
log.trace("getViosStorageFiberChannelAdapters() - fields: " + fieldsMap);
log.trace("getViosStorageFiberChannelAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -483,7 +483,7 @@ class ManagedSystem extends MetaSystem {
List<Measurement> list = new ArrayList<>();
metrics.systemUtil.sample.viosUtil.forEach( vios -> {
log.trace("getViosStoragePhysicalAdapters() - VIOS: " + vios.name);
log.trace("getViosStoragePhysicalAdapters() - VIOS: {}", vios.name);
vios.storage.genericPhysicalAdapters.forEach( adapter -> {
@ -492,7 +492,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("viosname", vios.name);
tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getViosStoragePhysicalAdapters() - tags: " + tagsMap);
log.trace("getViosStoragePhysicalAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
@ -501,7 +501,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.trace("getViosStoragePhysicalAdapters() - fields: " + fieldsMap);
log.trace("getViosStoragePhysicalAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
@ -517,7 +517,7 @@ class ManagedSystem extends MetaSystem {
List<Measurement> list = new ArrayList<>();
metrics.systemUtil.sample.viosUtil.forEach( vios -> {
log.trace("getViosStorageVirtualAdapters() - VIOS: " + vios.name);
log.trace("getViosStorageVirtualAdapters() - VIOS: {}", vios.name);
vios.storage.genericVirtualAdapters.forEach( adapter -> {
@ -526,7 +526,7 @@ class ManagedSystem extends MetaSystem {
tagsMap.put("viosname", vios.name);
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.trace("getViosStorageVirtualAdapters() - tags: " + tagsMap);
log.trace("getViosStorageVirtualAdapters() - tags: {}", tagsMap);
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
@ -535,7 +535,7 @@ class ManagedSystem extends MetaSystem {
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.trace("getViosStorageVirtualAdapters() - fields: " + fieldsMap);
log.trace("getViosStorageVirtualAdapters() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});

View File

@ -66,12 +66,12 @@ abstract class MetaSystem {
String timestamp = getStringMetricObject(metrics.systemUtil.sample.sampleInfo.timeStamp);
Instant instant = Instant.now();
try {
log.trace("getTimeStamp() - PMC Timestamp: " + timestamp);
log.trace("getTimeStamp() - PMC Timestamp: {}", timestamp);
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss[XXX][X]");
instant = Instant.from(dateTimeFormatter.parse(timestamp));
log.trace("getTimestamp() - Instant: " + instant.toString());
log.trace("getTimestamp() - Instant: {}", instant.toString());
} catch(DateTimeParseException e) {
log.warn("getTimestamp() - parse error: " + timestamp);
log.warn("getTimestamp() - parse error: {}", timestamp);
}
return instant;

View File

@ -48,11 +48,11 @@ class SystemEnergy extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", system.name);
log.trace("getPowerMetrics() - tags: " + tagsMap);
log.trace("getPowerMetrics() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("powerReading", metrics.systemUtil.sample.energyUtil.powerUtil.powerReading);
log.trace("getPowerMetrics() - fields: " + fieldsMap);
log.trace("getPowerMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
@ -65,7 +65,7 @@ class SystemEnergy extends MetaSystem {
HashMap<String, String> tagsMap = new HashMap<>();
tagsMap.put("servername", system.name);
log.trace("getThermalMetrics() - tags: " + tagsMap);
log.trace("getThermalMetrics() - tags: {}", tagsMap);
Map<String, Object> fieldsMap = new HashMap<>();
@ -82,7 +82,7 @@ class SystemEnergy extends MetaSystem {
fieldsMap.put("baseboardTemperature_" + t.entityInstance, t.temperatureReading);
}*/
log.trace("getThermalMetrics() - fields: " + fieldsMap);
log.trace("getThermalMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
return list;
}

View File

@ -10,7 +10,7 @@ class ConfigurationTest extends Specification {
Path testConfigurationFile = Paths.get(getClass().getResource('/hmci.toml').toURI())
void "test parsing"() {
void "test parsing of configuration file"() {
when:
Configuration conf = new Configuration(testConfigurationFile)
@ -20,13 +20,13 @@ class ConfigurationTest extends Specification {
}
void "test lookup influx"() {
void "test energy flag, default setting"() {
when:
Configuration conf = new Configuration(testConfigurationFile)
then:
conf != null
!conf.getHmc().get(0).energy
}

View File

@ -13,21 +13,19 @@ username = "root"
password = ""
database = "hmci"
# HMC on our primary site
[hmc.site1]
url = "https://10.10.10.10:12443"
username = "hmci"
password = "hmcihmci"
unsafe = true # Ignore SSL cert. errors
energy = false # Do not try to collect energy metrics
# One or more HMC's to query for data and metrics
[hmc]
# HMC on our primary site
[hmc.site1]
url = "https://10.10.10.10:12443"
username = "hmci"
password = "hmcihmci"
unsafe = true # Ignore SSL cert. errors
# Example
#[hmc.site2]
#url = "https://10.10.20.20:12443"
#username = "viewer"
#password = "someSecret"
#unsafe = false
#trace = "/tmp/pcm-files"
# Example
#[hmc.site2]
#url = "https://10.10.20.20:12443"
#username = "viewer"
#password = "someSecret"
#unsafe = false
#energy = true
#trace = "/tmp/pcm-files"