Compare commits

...

31 Commits
v1.4.0 ... main

Author SHA1 Message Date
Mark Nellemann 706f0c7038 Update README.md 2024-05-17 06:19:44 +00:00
Mark Nellemann 2c1921564b Merge pull request 'Avoid HMC sessions timeouts.' (#2) from sessiontimeouts into main
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/tag Build is passing Details
Reviewed-on: #2
2023-11-13 13:16:45 +00:00
Mark Nellemann a24b03f4ad Update 3rd party dependencies.
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/pr Build is passing Details
2023-11-13 14:14:27 +01:00
Mark Nellemann d59079e6da Update gradle plugin ospackage plugin.
continuous-integration/drone/push Build is passing Details
2023-08-10 18:31:04 +02:00
Mark Nellemann bdfa535b75 Work on avoiding lingering sessions on the HMC.
continuous-integration/drone/push Build is passing Details
2023-08-10 11:02:53 +02:00
Mark Nellemann 41decccc82 Merge remote-tracking branch 'origin/main' into influxdb2 2023-08-10 10:22:01 +02:00
Mark Nellemann 24d1701ab3 Update hmci version
continuous-integration/drone/push Build is passing Details
2023-06-28 12:00:19 +00:00
Mark Nellemann 5b2a3ff9ea Merge pull request 'influxdb2 support' (#1) from influxdb2 into main
continuous-integration/drone/tag Build is passing Details
Reviewed-on: #1
2023-05-19 18:39:20 +00:00
Mark Nellemann ec9586f870 Updated dashboards, docs and 3rd party deps. 2023-05-19 20:37:24 +02:00
Mark Nellemann 46fd9d7671 Modifications to support to InfluxDB v2.x 2023-05-17 20:36:40 +02:00
Mark Nellemann 8f4fbc6a93 Update dashboards. 2023-05-17 18:26:13 +02:00
Mark Nellemann 39af1e3c00 Increase influx writer bufferlimit. 2023-05-17 16:29:37 +02:00
Mark Nellemann 6b9b78f32c Switch to updated influxdb client.
continuous-integration/drone/push Build is passing Details
2023-04-04 22:22:10 +02:00
Mark Nellemann 2967f6ef75 Cleanup and dashboard fixes and improvements.
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/tag Build is passing Details
2023-03-21 14:57:00 +01:00
Mark Nellemann 6699566fba Update documentation.
continuous-integration/drone/push Build is passing Details
2023-03-08 10:24:21 +01:00
Mark Nellemann 55e7fe2b90 Update documentation.
continuous-integration/drone/push Build is passing Details
2023-03-08 10:02:43 +01:00
Mark Nellemann e30d290f07 Update documentation.
continuous-integration/drone/push Build is passing Details
2023-03-08 09:49:55 +01:00
Mark Nellemann f461b40321 Update dashboard links.
continuous-integration/drone/push Build is passing Details
2023-02-06 19:46:10 +01:00
Mark Nellemann c64bf66d9d Update dependencies and provide screenshot in README.
continuous-integration/drone/push Build is passing Details
2023-02-06 19:14:44 +01:00
Mark Nellemann 2e363f0a39 Update links.
continuous-integration/drone/push Build is passing Details
2023-01-18 15:40:58 +01:00
Mark Nellemann aa36e51367 Update gradle and fix typo
continuous-integration/drone/push Build is passing Details
2023-01-05 14:32:57 +01:00
Mark Nellemann 5952a21714 Fix error in sriov type being null.
continuous-integration/drone/push Build is passing Details
continuous-integration/drone/tag Build is passing Details
2023-01-05 14:28:12 +01:00
Mark Nellemann 985b9100c3 Update links
continuous-integration/drone/tag Build is passing Details
2023-01-04 15:46:37 +01:00
Mark Nellemann 2d3f304fb0 Preperations for migration. 2023-01-04 15:41:59 +01:00
Mark Nellemann e941fe81f5 Add screenshots. 2022-12-17 11:26:39 +01:00
Mark Nellemann 78ff6783aa Update README with links to other related projects. 2022-12-17 10:34:09 +01:00
Mark Nellemann bce1d08c0b Merged in samples (pull request #23)
Multiple samples
2022-12-16 07:09:18 +00:00
Mark Nellemann 5806277266 Go from millisec. to sec. precision in influx timestamps.
Do not go to minNumberOfSamples at first processing.
2022-12-16 08:06:40 +01:00
Mark Nellemann 1b5a91c776 Go from millisec. to sec. precision in influx timestamps.
Do not go to minNumberOfSamples at first processing.
2022-12-07 16:33:56 +01:00
Mark Nellemann fb5bfd532b More work on multiple samples. 2022-12-07 16:12:29 +01:00
Mark Nellemann a0cfff18ef Fetch multiple samples. 2022-12-05 15:18:42 +01:00
81 changed files with 9599 additions and 3357 deletions

23
.drone.yml Normal file
View File

@ -0,0 +1,23 @@
---
kind: pipeline
name: default
type: docker
steps:
- name: test
image: eclipse-temurin:8-jdk
commands:
- ./gradlew test
- name: build
image: eclipse-temurin:8-jdk
environment:
AUTH_TOKEN: # Gitea access token ENV variable
from_secret: auth # Name of DroneCI secret exposed above
commands:
- ./gradlew build packages
- for file in build/libs/*-all.jar ; do curl --user "${DRONE_REPO_OWNER}:$${AUTH_TOKEN}" --upload-file "$${file}" "https://git.data.coop/api/packages/${DRONE_REPO_OWNER}/generic/${DRONE_REPO_NAME}/${DRONE_TAG}/$(basename $file)" ; done
- for file in build/distributions/*.deb ; do curl --user "${DRONE_REPO_OWNER}:$${AUTH_TOKEN}" --upload-file "$${file}" "https://git.data.coop/api/packages/${DRONE_REPO_OWNER}/generic/${DRONE_REPO_NAME}/${DRONE_TAG}/$(basename $file)" ; done
- for file in build/distributions/*.rpm ; do curl --user "${DRONE_REPO_OWNER}:$${AUTH_TOKEN}" --upload-file "$${file}" "https://git.data.coop/api/packages/${DRONE_REPO_OWNER}/generic/${DRONE_REPO_NAME}/${DRONE_TAG}/$(basename $file)" ; done
when:
event:
- tag

View File

@ -2,31 +2,44 @@
All notable changes to this project will be documented in this file.
## [1.4.0] - 2011-12-01
## 1.4.5 - 2023-11-13
- Adjust timeout to not have lingering sessions on HMC
- Update 3rd party dependencies
## 1.4.4 - 2023-05-20
- Support for InfluxDB v2, now requires InfluxDB 1.8 or later
- Increase influx writer buffer limit
- Various dashboard improvements
## 1.4.3 - 2023-03-21
- Fix and improve processor utilization dashboards.
- Minor code cleanup.
## 1.4.2 - 2023-01-05
- Fix error in SR-IOV port type being null.
## 1.4.1 - 2022-12-15
- Retrieve multiple PCM samples and keep track of processing.
- Rename VIOS metric 'vFC' (storage adapter) to 'virtual'.
## 1.4.0 - 2022-12-01
- Rewrite of toml+xml+json de-serialization code (uses jackson now).
- Changes to configuration file format - please look at [doc/hmci.toml](doc/hmci.toml) as example.
- Logging (write to file) JSON output from HMC is currently not possible.
## [1.3.3] - 2022-09-20
## 1.3.3 - 2022-09-20
- Default configuration location on Windows platform.
- Process LPAR SR-IOV logical network ports data
- Update default dashboards
- Update documentation
## [1.3.0] - 2022-02-04
## 1.3.0 - 2022-02-04
- Correct use of InfluxDB batch writing.
## [1.2.8] - 2022-02-28
## 1.2.8 - 2022-02-28
- Sort measurement tags before writing to InfluxDB.
- Update 3rd party dependencies.
## [1.2.7] - 2022-02-24
## 1.2.7 - 2022-02-24
- Options to include/exclude Managed Systems and/or Logical Partitions.
[1.4.0]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.4.0%0Dv1.3.3
[1.3.3]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.3.3%0Dv1.3.0
[1.3.0]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.3.0%0Dv1.2.8
[1.2.8]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.8%0Dv1.2.7
[1.2.7]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.7%0Dv1.2.6
[1.2.6]: https://bitbucket.org/mnellemann/hmci/branches/compare/v1.2.6%0Dv1.2.5

221
README.md
View File

@ -1,220 +1,3 @@
# HMC Insights
# Repository moved
**HMCi** is a utility that collects metrics from one or more *IBM Power Hardware Management Consoles (HMC)*, without the need to install agents on logical partitions / virtual machines running on the IBM Power systems. The metric data is processed and saved into an InfluxDB time-series database. Grafana is used to visualize the metrics data from InfluxDB through provided dashboards, or your own customized dashboards.
This software is free to use and is licensed under the [Apache 2.0 License](https://bitbucket.org/mnellemann/hmci/src/master/LICENSE), but is not supported or endorsed by International Business Machines (IBM). There is an optional [companion agent](https://bitbucket.org/mnellemann/sysmon/), which provides more metrics from within AIX and Linux.
Metrics includes:
- *Managed Systems* - the physical Power servers
- *Logical Partitions* - the virtualized servers running AIX, Linux or IBM-i (AS/400)
- *Virtual I/O Servers* - the i/o partition(s) virtualizing network and storage
- *Energy* - watts and temperatures (needs to be enabled and is not available on P7 and multi-chassis systems)
![architecture](doc/HMCi.png)
## Installation and Setup
There are few steps in the installation.
1. Preparations on the Hardware Management Console (HMC)
2. Installation of InfluxDB and Grafana software
3. Installation and configuration of *HMC Insights* (HMCi)
4. Configure Grafana and import example dashboards
### 1 - IBM Power HMC Setup Instructions
- Login to your HMC
- Navigate to *Console Settings*
- Go to *Change Date and Time*
- Set correct timezone, if not done already
- Configure one or more NTP servers, if not done already
- Enable the NTP client, if not done already
- Navigate to *Users and Security*
- Create a new read-only/viewer **hmci** user, which will be used to connect to the HMC.
- Click *Manage User Profiles and Access*, edit the newly created *hmci* user and click *User Properties*:
- Set *Session timeout minutes* to **60**
- Set *Verify timeout minutes* to **15**
- Set *Idle timeout minutes* to **90**
- Set *Minimum time in days between password changes* to **0**
- **Enable** *Allow remote access via the web*
- Navigate to *HMC Management* and *Console Settings*
- Click *Change Performance Monitoring Settings*:
- Enable *Performance Monitoring Data Collection for Managed Servers*: **All On**
- Set *Performance Data Storage* to **1** day or preferable more
If you do not enable *Performance Monitoring Data Collection for Managed Servers*, you will see errors such as *Unexpected response: 403*. Use the HMCi debug option to get more details about what is going on.
### 2 - InfluxDB and Grafana Installation
Install InfluxDB (v. **1.8.x** or **1.9.x** for best compatibility with Grafana) on a host which is network accessible by the HMCi utility (the default InfluxDB port is 8086). You can install Grafana on the same server or any server which are able to connect to the InfluxDB database. The Grafana installation needs to be accessible from your browser (default on port 3000). The default settings for both InfluxDB and Grafana will work fine as a start.
- You can download [Grafana ppc64le](https://www.power-devops.com/grafana) and [InfluxDB ppc64le](https://www.power-devops.com/influxdb) packages for most Linux distributions and AIX on the [Power DevOps](https://www.power-devops.com/) site.
- Binaries for amd64/x86 are available from the [Grafana website](https://grafana.com/grafana/download) (select the **OSS variant**) and [InfluxDB website](https://portal.influxdata.com/downloads/) and most likely directly from your Linux distributions repositories.
- Create the empty *hmci* database by running the **influx** CLI command and type:
```text
CREATE DATABASE "hmci" WITH DURATION 365d REPLICATION 1;
```
See the [Influx documentation](https://docs.influxdata.com/influxdb/v1.8/query_language/manage-database/#create-database) for more information on duration and replication.
### 3 - HMCi Installation & Configuration
Install *HMCi* on a host, that can connect to your Power HMC (on port 12443), and is also allowed to connect to the InfluxDB service. This *can be* the same LPAR/VM as used for the InfluxDB installation.
- Ensure you have **correct date/time** and NTPd running to keep it accurate!
- The only requirement for **hmci** is the Java runtime, version 8 (or later)
- Install **HMCi** from [downloads](https://bitbucket.org/mnellemann/hmci/downloads/) (rpm, deb or jar) or build from source
- On RPM based systems: ```sudo rpm -ivh hmci-x.y.z-n.noarch.rpm```
- On DEB based systems: ```sudo dpkg -i hmci_x.y.z-n_all.deb```
- Copy the **/opt/hmci/doc/hmci.toml** configuration example into **/etc/hmci.toml** and edit the configuration to suit your environment. The location of the configuration file can optionally be changed with the *--conf* option.
- Run the **/opt/hmci/bin/hmci** program in a shell, as a @reboot cron task or configure as a proper service - there are instructions in the [doc/readme-service.md](doc/readme-service.md) file.
- When started, *hmci* expects the InfluxDB database to exist already.
### 4 - Grafana Configuration
- Configure Grafana to use InfluxDB as a new datasource
- **NOTE:** set *Min time interval* to *30s* or *1m* depending on your HMCi *refresh* setting.
- Import example dashboards from [doc/dashboards/*.json](doc/dashboards/) into Grafana as a starting point and get creative making your own cool dashboards - please share anything useful :)
## Notes
### No data (or past/future data) shown in Grafana
This is most likely due to timezone, date and/or NTP not being configured correctly on the HMC and/or host running HMCi.
Example showing how you configure related settings through the HMC CLI:
```shell
chhmc -c date -s modify --datetime MMDDhhmm # Set current date/time: MMDDhhmm[[CC]YY][.ss]
chhmc -c date -s modify --timezone Europe/Copenhagen # Configure your timezone
chhmc -c xntp -s enable # Enable the NTP service
chhmc -c xntp -s add -a IP_Addr # Add a remote NTP server
```
Remember to reboot your HMC after changing the timezone.
### Compatibility with nextract Plus
From version 1.2 *HMCi* is made compatible with the similar [nextract Plus](https://www.ibm.com/support/pages/nextract-plus-hmc-rest-api-performance-statistics) tool from Nigel Griffiths. This means that the Grafana [dashboards](https://grafana.com/grafana/dashboards/13819) made by Nigel are compatible with *HMCi* and the other way around.
### Start InfluxDB and Grafana at boot (systemd compatible Linux)
```shell
systemctl enable influxdb
systemctl start influxdb
systemctl enable grafana-server
systemctl start grafana-server
```
### InfluxDB Retention Policy
Examples for changing the default InfluxDB retention policy for the hmci database:
```text
ALTER RETENTION POLICY "autogen" ON "hmci" DURATION 156w
ALTER RETENTION POLICY "autogen" ON "hmci" DURATION 90d
```
### Upgrading HMCi
On RPM based systems (RedHat, Suse, CentOS), download the latest *hmci-x.y.z-n.noarch.rpm* file and upgrade:
```shell
sudo rpm -Uvh hmci-x.y.z-n.noarch.rpm
```
On DEB based systems (Debian, Ubuntu and derivatives), download the latest *hmci_x.y.z-n_all.deb* file and upgrade:
```shell
sudo dpkg -i hmci_x.y.z-n_all.deb
```
Restart the HMCi service on *systemd* based Linux systems:
```shell
systemctl restart hmci
journalctl -f -u hmci # to check log output
```
### AIX Notes
To install (or upgrade) on AIX, you need to pass the *--ignoreos* flag to the *rpm* command:
```shell
rpm -Uvh --ignoreos hmci-x.y.z-n.noarch.rpm
```
## Grafana Screenshots
Below are screenshots of the provided Grafana dashboards (found in the **doc/** folder), which can be used as a starting point.
- [hmci-systems.png](https://bitbucket.org/mnellemann/hmci/downloads/hmci-systems-dashboard.png)
- [hmci-vois.png](https://bitbucket.org/mnellemann/hmci/downloads/hmci-vios-dashboard.png)
- [hmci-lpars](https://bitbucket.org/mnellemann/hmci/downloads/hmci-lpars-dashboard.png)
## Known problems
### Incomplete set of metrics
I have not been able to test and verify all types of metric data. If you encounter any missing or wrong data, please [contact me](mark.nellemann@gmail.com) and I will try to fix it.
It is possible to save the raw JSON data received from the HCM, which can help me implement missing data. You need to specify **trace = "/tmp/hmci-trace"** or some other location, in the configuration file under the HMC instance.
### Naming collision
You can't have partitions (or Virtual I/O Servers) on different Systems with the same name, as these cannot be distinguished when metrics are
written to InfluxDB (which uses the name as key).
### Renaming partitions
If you rename a partition, the metrics in InfluxDB will still be available by the old name, and new metrics will be available by the new name of the partition. There is no easy way to migrate the old data, but you can delete it easily:
```text
DELETE WHERE lparname = 'name';
```
## Development Information
You need Java (JDK) version 8 or later to build hmci.
### Build & Test
Use the gradle build tool, which will download all required dependencies:
```shell
./gradlew clean build
```
### Local Testing
#### InfluxDB
Start the InfluxDB container:
```shell
docker run --name=influxdb --rm -d -p 8086:8086 influxdb:1.8
```
Create the *hmci* database:
```shell
docker exec -i influxdb influx -execute "CREATE DATABASE hmci"
```
#### Grafana
Start the Grafana container, linking it to the InfluxDB container:
```shell
docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/grafana
```
Setup Grafana to connect to the InfluxDB container by defining a new datasource on URL *http://influxdb:8086* named *hmci*.
Grafana dashboards can be imported from the *doc/* folder.
Please visit [github.com/mnellemann/hmci](https://github.com/mnellemann/hmci)

View File

@ -1,14 +1,11 @@
plugins {
id 'java'
id 'jacoco'
id 'groovy'
id 'application'
// Code coverage of tests
id 'jacoco'
id "com.github.johnrengelman.shadow" version "7.1.2"
id "net.nemerosa.versioning" version "2.15.1"
id "nebula.ospackage" version "9.1.1"
id "com.netflix.nebula.ospackage" version "11.5.0"
id "com.github.johnrengelman.shadow" version "7.1.2"
}
repositories {
@ -20,26 +17,24 @@ group = projectGroup
version = projectVersion
dependencies {
annotationProcessor 'info.picocli:picocli-codegen:4.7.0'
implementation 'info.picocli:picocli:4.7.0'
implementation 'org.influxdb:influxdb-java:2.23'
//implementation 'com.influxdb:influxdb-client-java:6.7.0'
implementation 'org.slf4j:slf4j-api:2.0.4'
implementation 'org.slf4j:slf4j-simple:2.0.4'
implementation 'com.squareup.okhttp3:okhttp:4.10.0' // Also used by InfluxDB Client
//implementation "org.eclipse.jetty:jetty-client:9.4.49.v20220914"
implementation 'com.fasterxml.jackson.core:jackson-databind:2.14.1'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.14.1'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-toml:2.14.1'
annotationProcessor 'info.picocli:picocli-codegen:4.7.5'
implementation 'info.picocli:picocli:4.7.5'
implementation 'org.slf4j:slf4j-api:2.0.9'
implementation 'org.slf4j:slf4j-simple:2.0.9'
implementation 'com.squareup.okhttp3:okhttp:4.11.0' // Also used by InfluxDB Client
implementation 'com.influxdb:influxdb-client-java:6.10.0'
implementation 'com.fasterxml.jackson.core:jackson-databind:2.15.2'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.15.2'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-toml:2.15.2'
testImplementation 'junit:junit:4.13.2'
testImplementation 'org.spockframework:spock-core:2.3-groovy-3.0'
testImplementation 'org.spockframework:spock-core:2.3-groovy-4.0'
testImplementation "org.mock-server:mockserver-netty-no-dependencies:5.14.0"
}
application {
mainClass.set('biz.nellemann.hmci.Application')
applicationDefaultJvmArgs = [ "-server", "-Xms64m", "-Xmx64m", "-XX:+UseG1GC", "-XX:+ExitOnOutOfMemoryError", "-XX:+AlwaysPreTouch" ]
applicationDefaultJvmArgs = [ "-server", "-Xms64m", "-Xmx256m", "-XX:+UseG1GC", "-XX:+ExitOnOutOfMemoryError", "-XX:+AlwaysPreTouch" ]
}
java {
@ -52,7 +47,6 @@ test {
}
apply plugin: 'nebula.ospackage'
ospackage {
packageName = 'hmci'
release = '1'
@ -89,7 +83,7 @@ buildDeb {
}
jacoco {
toolVersion = "0.8.8"
toolVersion = "0.8.9"
}
jacocoTestReport {
@ -128,7 +122,7 @@ jar {
}
}
tasks.create("packages") {
tasks.register("packages") {
group "build"
dependsOn ":build"
dependsOn ":buildDeb"

File diff suppressed because one or more lines are too long

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

After

Width:  |  Height:  |  Size: 163 KiB

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_HMCI",
"label": "hmci",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -71,7 +71,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 1510,
@ -93,7 +93,7 @@
},
"id": 37,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "9.1.6",
@ -390,7 +390,7 @@
"measurement": "lpar_details",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"weight\") AS \"Weight\", last(\"mode\") AS \"Mode\", last(\"entitledProcUnits\") AS \"eCPU\", mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\")*100 AS \"Utilization eCPU\", last(\"currentVirtualProcessors\") AS \"vCPU\", mean(\"utilizedProcUnits\") / mean(\"maxProcUnits\") * 100 AS \"Utilization vCPU\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/) AND (\"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY \"lparname\" fill(previous)",
"query": "SELECT last(\"weight\") AS \"Weight\", last(\"mode\") AS \"Mode\", last(\"entitledProcUnits\") AS \"eCPU\", mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\")*100 AS \"Utilization eCPU\", last(\"currentVirtualProcessors\") AS \"vCPU\", mean(\"utilizedProcUnits\") / mean(\"currentVirtualProcessors\") * 100 AS \"Utilization vCPU\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/) AND (\"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY \"lparname\" fill(previous)",
"queryType": "randomWalk",
"rawQuery": true,
"refId": "A",
@ -534,6 +534,138 @@
"type": "influxdb",
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 3,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineStyle": {
"fill": "solid"
},
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "never",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "normal"
},
"thresholdsStyle": {
"mode": "line"
}
},
"decimals": 2,
"links": [],
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 11,
"w": 12,
"x": 0,
"y": 12
},
"id": 2,
"links": [],
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "multi",
"sort": "desc"
}
},
"pluginVersion": "8.1.4",
"targets": [
{
"alias": "$tag_lparname",
"datasource": {
"type": "influxdb",
"uid": "${DS_HMCI}"
},
"groupBy": [
{
"params": [
"$__interval"
],
"type": "time"
},
{
"params": [
"null"
],
"type": "fill"
}
],
"hide": false,
"measurement": "/^$ServerName$/",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT mean(\"utilizedProcUnits\") AS \"usage\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY time($interval), \"lparname\", \"servername\" fill(linear)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
"select": [
[
{
"params": [
"value"
],
"type": "field"
},
{
"params": [],
"type": "mean"
}
]
],
"tags": []
}
],
"title": "Processor Units - Utilization Stacked",
"transformations": [],
"type": "timeseries"
},
{
"datasource": {
"type": "influxdb",
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
@ -596,11 +728,11 @@
},
"gridPos": {
"h": 11,
"w": 24,
"x": 0,
"w": 12,
"x": 12,
"y": 12
},
"id": 2,
"id": 40,
"links": [],
"options": {
"legend": {
@ -640,7 +772,7 @@
"measurement": "/^$ServerName$/",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT (mean(\"utilizedProcUnits\") / mean(\"maxProcUnits\")) * 100 AS \"usage\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY time($interval), \"lparname\", \"servername\" fill(linear)",
"query": "SELECT (mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\")) * 100 AS \"usage\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY time($interval), \"lparname\", \"servername\" fill(linear)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -661,7 +793,7 @@
"tags": []
}
],
"title": "Processor Units - Utilization Percentage",
"title": "Processor Units - Utilization / Entitled",
"transformations": [],
"type": "timeseries"
},
@ -2509,7 +2641,7 @@
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "percent"
"mode": "normal"
},
"thresholdsStyle": {
"mode": "off"
@ -2521,10 +2653,6 @@
"steps": [
{
"color": "green"
},
{
"color": "red",
"value": 80
}
]
},
@ -2616,11 +2744,11 @@
]
}
],
"title": "Memory Assigned",
"title": "Memory Assigned - Stacked",
"type": "timeseries"
}
],
"refresh": false,
"refresh": "30s",
"schemaVersion": 37,
"style": "dark",
"tags": [
@ -2659,7 +2787,7 @@
"type": "influxdb",
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"lpar_processor\" WITH KEY = \"lparname\" WHERE servername =~ /$ServerName/",
"definition": "SHOW TAG VALUES FROM \"lpar_processor\" WITH KEY = \"lparname\" WHERE servername =~ /$ServerName/ ",
"hide": 0,
"includeAll": true,
"label": "Logical Partition",
@ -2667,7 +2795,7 @@
"multiFormat": "regex values",
"name": "LPAR",
"options": [],
"query": "SHOW TAG VALUES FROM \"lpar_processor\" WITH KEY = \"lparname\" WHERE servername =~ /$ServerName/",
"query": "SHOW TAG VALUES FROM \"lpar_processor\" WITH KEY = \"lparname\" WHERE servername =~ /$ServerName/ ",
"refresh": 1,
"refresh_on_load": false,
"regex": "",
@ -2710,6 +2838,6 @@
"timezone": "browser",
"title": "HMCi - Power LPAR Overview",
"uid": "Xl7oHESGz",
"version": 4,
"version": 9,
"weekStart": ""
}
}

View File

@ -1,8 +1,8 @@
{
"__inputs": [
{
"name": "DS_INFLUXDB",
"label": "InfluxDB",
"name": "DS_HMCI",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -15,7 +15,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "9.1.3"
"version": "9.1.6"
},
{
"type": "datasource",
@ -59,7 +59,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 1510,
@ -71,7 +71,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"gridPos": {
"h": 3,
@ -81,15 +81,15 @@
},
"id": 37,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "9.1.3",
"pluginVersion": "9.1.6",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"refId": "A"
}
@ -100,7 +100,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -189,7 +189,7 @@
"alias": "$tag_lparname",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"groupBy": [
{
@ -209,7 +209,7 @@
"measurement": "/^$ServerName$/",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT mean(\"utilizedProcUnits\") / mean(\"maxProcUnits\") AS \"usage\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY time($interval), \"lparname\", \"servername\" fill(none)",
"query": "SELECT mean(\"utilizedProcUnits\") / mean(\"currentVirtualProcessors\") AS \"usage\" FROM \"lpar_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"lparname\" =~ /^$LPAR$/) AND $timeFilter GROUP BY time($interval), \"lparname\", \"servername\" fill(none)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -237,7 +237,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -319,7 +319,7 @@
"alias": "$tag_servername - $tag_lparname ($col)",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -419,7 +419,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -497,7 +497,7 @@
"alias": "$tag_servername - $tag_lparname ($col)",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -620,7 +620,7 @@
"current": {},
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"server_processor\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"hide": 0,
@ -644,7 +644,7 @@
"current": {},
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"lpar_processor\" WITH KEY = \"lparname\" WHERE servername =~ /$ServerName/",
"hide": 0,
@ -697,6 +697,6 @@
"timezone": "browser",
"title": "HMCi - Power LPAR Utilization",
"uid": "jFsbpTH4k",
"version": 4,
"version": 2,
"weekStart": ""
}
}

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_HMCI",
"label": "hmci",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -70,7 +70,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
@ -91,7 +91,7 @@
},
"id": 11,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "9.1.6",
@ -107,6 +107,21 @@
"transparent": true,
"type": "text"
},
{
"collapsed": false,
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 3
},
"id": 15,
"panels": [],
"repeat": "ServerName",
"repeatDirection": "h",
"title": "$ServerName",
"type": "row"
},
{
"datasource": {
"type": "influxdb",
@ -140,7 +155,7 @@
"h": 7,
"w": 24,
"x": 0,
"y": 3
"y": 4
},
"id": 7,
"options": {
@ -250,7 +265,7 @@
"h": 11,
"w": 8,
"x": 0,
"y": 10
"y": 11
},
"id": 4,
"options": {
@ -453,7 +468,7 @@
"h": 11,
"w": 16,
"x": 8,
"y": 10
"y": 11
},
"id": 12,
"options": {
@ -629,7 +644,7 @@
"h": 10,
"w": 8,
"x": 0,
"y": 21
"y": 22
},
"id": 13,
"options": {
@ -779,7 +794,7 @@
"h": 10,
"w": 16,
"x": 8,
"y": 21
"y": 22
},
"id": 5,
"options": {
@ -874,13 +889,13 @@
"type": "influxdb",
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"server_processor\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"definition": "SHOW TAG VALUES FROM \"server_energy_power\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"hide": 0,
"includeAll": false,
"multi": false,
"includeAll": true,
"multi": true,
"name": "ServerName",
"options": [],
"query": "SHOW TAG VALUES FROM \"server_processor\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"query": "SHOW TAG VALUES FROM \"server_energy_power\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"refresh": 1,
"regex": "",
"skipUrlSync": false,
@ -912,6 +927,6 @@
"timezone": "",
"title": "HMCi - Power System Energy",
"uid": "oHcrgD1Mk",
"version": 2,
"version": 7,
"weekStart": ""
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
{
"__inputs": [
{
"name": "DS_INFLUXDB",
"label": "InfluxDB",
"name": "DS_HMCI",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -77,7 +77,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 1465,
@ -90,7 +90,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"gridPos": {
"h": 3,
@ -100,7 +100,7 @@
},
"id": 33,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "8.3.5",
@ -108,7 +108,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"refId": "A"
}
@ -147,7 +147,7 @@
"alias": "$tag_servername",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"groupBy": [
{
@ -273,7 +273,7 @@
"alias": "$tag_servername",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"groupBy": [
{
@ -381,7 +381,7 @@
"alias": "$tag_servername",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"groupBy": [
{
@ -482,7 +482,7 @@
"alias": "$tag_servername",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"groupBy": [
{
@ -597,7 +597,7 @@
"alias": "$tag_servername",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [

View File

@ -2,7 +2,7 @@
"__inputs": [
{
"name": "DS_HMCI",
"label": "hmci",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -71,7 +71,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 1465,
@ -93,7 +93,7 @@
},
"id": 29,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "9.1.6",
@ -445,12 +445,7 @@
"show": false
},
"showHeader": true,
"sortBy": [
{
"desc": true,
"displayName": "Utilization"
}
]
"sortBy": []
},
"pluginVersion": "9.1.6",
"targets": [
@ -472,7 +467,7 @@
"measurement": "lpar_details",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"weight\") AS \"Weight\", last(\"entitledProcUnits\") AS \"Entitled\", last(\"currentVirtualProcessors\") AS \"VP\", (last(\"utilizedProcUnits\") / last(\"maxProcUnits\")) * 100 AS \"Utilization\", last(\"mode\") AS \"Mode\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/) AND (\"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY \"viosname\" fill(previous)",
"query": "SELECT last(\"weight\") AS \"Weight\", last(\"entitledProcUnits\") AS \"Entitled\", last(\"currentVirtualProcessors\") AS \"VP\", (mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\")) * 100 AS \"Utilization\", last(\"mode\") AS \"Mode\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/) AND (\"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY \"viosname\" fill(previous)",
"queryType": "randomWalk",
"rawQuery": true,
"refId": "A",
@ -1650,7 +1645,7 @@
}
],
"hide": false,
"measurement": "vios_storage_vFC",
"measurement": "vios_storage_virtual",
"orderByTime": "ASC",
"policy": "default",
"refId": "B",
@ -1690,7 +1685,7 @@
]
}
],
"title": "Virtual Fiber Channel Adapters - $ServerName - $ViosName",
"title": "Virtual Adapters - $ServerName - $ViosName",
"type": "timeseries"
}
],
@ -1713,7 +1708,7 @@
},
"definition": "SHOW TAG VALUES FROM \"server_processor\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"hide": 0,
"includeAll": false,
"includeAll": true,
"label": "Server",
"multi": true,
"multiFormat": "regex values",
@ -1786,6 +1781,6 @@
"timezone": "browser",
"title": "HMCi - Power VIO Overview",
"uid": "DDNEv5vGz",
"version": 2,
"version": 3,
"weekStart": ""
}
}

View File

@ -1,8 +1,8 @@
{
"__inputs": [
{
"name": "DS_INFLUXDB",
"label": "InfluxDB",
"name": "DS_HMCI",
"label": "Database",
"description": "",
"type": "datasource",
"pluginId": "influxdb",
@ -21,7 +21,7 @@
"type": "grafana",
"id": "grafana",
"name": "Grafana",
"version": "9.1.3"
"version": "9.1.6"
},
{
"type": "datasource",
@ -65,7 +65,7 @@
}
]
},
"description": "https://bitbucket.org/mnellemann/hmci/",
"description": "https://git.data.coop/nellemann/hmci/ - Metrics from IBM Power Systems",
"editable": true,
"fiscalYearStartMonth": 0,
"gnetId": 1465,
@ -77,7 +77,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"gridPos": {
"h": 3,
@ -87,15 +87,15 @@
},
"id": 29,
"options": {
"content": "## Metrics collected from IBM Power HMC\n \nFor more information: [bitbucket.org/mnellemann/hmci](https://bitbucket.org/mnellemann/hmci)\n ",
"content": "## Metrics collected from IBM Power HMC\n \nFor more information visit: [git.data.coop/nellemann/hmci](https://git.data.coop/nellemann/hmci)\n ",
"mode": "markdown"
},
"pluginVersion": "9.1.3",
"pluginVersion": "9.1.6",
"targets": [
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"refId": "A"
}
@ -106,7 +106,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -155,13 +155,13 @@
"showThresholdLabels": false,
"showThresholdMarkers": false
},
"pluginVersion": "9.1.3",
"pluginVersion": "9.1.6",
"targets": [
{
"alias": "$tag_servername - $tag_viosname",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -194,7 +194,7 @@
"measurement": "vios_processor",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT last(\"utilizedProcUnits\") / last(\"maxProcUnits\") AS \"utilization\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY time($interval), \"viosname\", \"servername\" fill(none)",
"query": "SELECT mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\") AS \"utilization\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY time($interval), \"viosname\", \"servername\" fill(none)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -257,7 +257,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -344,7 +344,7 @@
"alias": "$tag_servername - $tag_viosname",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -377,7 +377,7 @@
"measurement": "vios_processor",
"orderByTime": "ASC",
"policy": "default",
"query": "SELECT mean(\"utilizedProcUnits\") / mean(\"maxProcUnits\") AS \"utilization\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY time($interval), \"viosname\", \"servername\" fill(none)",
"query": "SELECT mean(\"utilizedProcUnits\") / mean(\"entitledProcUnits\") AS \"utilization\" FROM \"vios_processor\" WHERE (\"servername\" =~ /^$ServerName$/ AND \"viosname\" =~ /^$ViosName$/) AND $timeFilter GROUP BY time($interval), \"viosname\", \"servername\" fill(none)",
"rawQuery": true,
"refId": "A",
"resultFormat": "time_series",
@ -440,7 +440,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -527,7 +527,7 @@
"alias": "$tag_servername - $tag_viosname ($tag_location - $col)",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -645,7 +645,7 @@
{
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"description": "",
"fieldConfig": {
@ -727,7 +727,7 @@
"alias": "$tag_servername - $tag_viosname ($tag_location - $col)",
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"dsType": "influxdb",
"groupBy": [
@ -860,7 +860,7 @@
"current": {},
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"server_processor\" WITH KEY = \"servername\" WHERE time > now() - 24h",
"hide": 0,
@ -884,7 +884,7 @@
"current": {},
"datasource": {
"type": "influxdb",
"uid": "${DS_INFLUXDB}"
"uid": "${DS_HMCI}"
},
"definition": "SHOW TAG VALUES FROM \"vios_details\" WITH KEY = \"viosname\" WHERE servername =~ /$ServerName/ AND time > now() - 24h",
"hide": 0,
@ -906,7 +906,7 @@
]
},
"time": {
"from": "now-2d",
"from": "now-7d",
"now": false,
"to": "now-30s"
},
@ -937,6 +937,6 @@
"timezone": "browser",
"title": "HMCi - Power VIO Utilization",
"uid": "DDNEv5vGy",
"version": 10,
"version": 2,
"weekStart": ""
}
}

View File

@ -1,17 +1,25 @@
# HMCi Configuration
# Copy this file into /etc/hmci.toml and customize it to your environment.
###
### Define one InfluxDB to save metrics into
### There must be only one and it should be named [influx]
###
# InfluxDB v1.x example
#[influx]
#url = "http://localhost:8086"
#username = "root"
#password = ""
#database = "hmci"
# InfluxDB v2.x example
[influx]
url = "http://localhost:8086"
username = "root"
password = ""
database = "hmci"
org = "myOrg"
token = "rAnd0mT0k3nG3neRaT3dByInF1uxDb=="
bucket = "hmci"
###

View File

@ -1,19 +1,20 @@
# Instructions for AIX Systems
Ensure you have **correct date/time** and NTPd running to keep it accurate!
Please note that the software versions referenced in this document might have changed and might not be available/working unless updated.
More details are available in the [README.md](../README.md) file.
- Grafana and InfluxDB can be downloaded from the [Power DevOps](https://www.power-devops.com/) website - look under the *Monitor* section.
- Ensure Java (version 8 or later) is installed and available in your PATH.
- Ensure Java (version 8 or later) is installed and available in your PATH (eg. in the */etc/environment* file).
## Download and Install HMCi
[Download](https://git.data.coop/nellemann/-/packages/generic/hmci/) the latest version of HMCi package for rpm.
```shell
wget https://bitbucket.org/mnellemann/hmci/downloads/hmci-1.3.1-1_all.rpm
rpm -i --ignoreos hmci-1.3.1-1_all.rpm
rpm -ivh --ignoreos hmci-1.4.2-1_all.rpm
cp /opt/hmci/doc/hmci.toml /etc/
```

View File

@ -2,14 +2,14 @@
Please note that the software versions referenced in this document might have changed and might not be available/working unless updated.
More details are available in the [README.md](../README.md) file.
Ensure you have **correct date/time** and NTPd running to keep it accurate!
All commands should be run as root or through sudo.
## Install the Java Runtime from repository
```shell
apt-get install default-jre-headless
apt-get install default-jre-headless wget
```
@ -25,30 +25,38 @@ systemctl start influxdb
Run the ```influx``` cli command and create the *hmci* database.
```sql
CREATE DATABASE "hmci" WITH DURATION 365d REPLICATION 1;
```
## Download and Install Grafana
```shell
sudo apt-get install -y adduser libfontconfig1
wget https://dl.grafana.com/oss/release/grafana_9.1.3_amd64.deb
dpkg -i grafana_9.1.3_amd64.deb
apt-get install -y adduser libfontconfig1
wget https://dl.grafana.com/oss/release/grafana_9.1.7_amd64.deb
dpkg -i grafana_9.1.7_amd64.deb
systemctl daemon-reload
systemctl enable grafana-server
systemctl start grafana-server
```
When logged in to Grafana (port 3000, admin/admin) create a datasource that points to the local InfluxDB. Now import the provided dashboards.
## Download and Install HMCi
[Download](https://git.data.coop/nellemann/-/packages/generic/hmci/) the latest version of HMCi packaged for deb.
```shell
wget https://bitbucket.org/mnellemann/hmci/downloads/hmci_1.3.1-1_all.deb
dpkg -i hmci_1.3.1-1_all.deb
wget https://git.data.coop/api/packages/nellemann/generic/hmci/v1.4.2/hmci_1.4.2-1_all.deb
dpkg -i hmci_1.4.2-1_all.deb
cp /opt/hmci/doc/hmci.toml /etc/
cp /opt/hmci/doc/hmci.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable hmci
```
Now modify */etc/hmci.toml* and test setup by running ```/opt/hmci/bin/hmci -d``` manually and verify connection to HMC and InfluxDB. Afterwards start service with ```systemctl start hmci``` .
## Configure HMCi
Now modify **/etc/hmci.toml** (edit URL and credentials to your HMCs) and test the setup by running ```/opt/hmci/bin/hmci -d``` in the foreground/terminal and look for any errors.
Press CTRL+C to stop and then start as a background service with ```systemctl start hmci```.
You can see the log/output by running ```journalctl -f -u hmci```.

40
doc/readme-grafana.md Normal file
View File

@ -0,0 +1,40 @@
# Grafana Setup
When installed Grafana listens on [http://localhost:3000](http://localhost:3000) and you can login as user *admin* with password *admin*. Once logged in you are asked to change the default password.
## Datasource
- Configure Grafana to use InfluxDB as a new datasource
- Name the datasource **hmci** to make it obvious what it contains.
- You would typically use *http://localhost:8086* without any credentials.
- For InfluxDB 2.x add a custom header: Authorization = Token myTokenFromInfluxDB
- The name of the database would be *hmci* (or another name you used when creating it)
- **NOTE:** set *Min time interval* to *30s* or *1m* depending on your HMCi *refresh* setting.
## Dashboards
Import all or some of the example dashboards from [dashboards/*.json](dashboards/) into Grafana as a starting point and get creative making your own cool dashboards - please share anything useful :)
- When importing a dashboard, select the **hmci** datasource you have created.
## Security and Proxy
The easiest way to secure Grafana with https is to put it behind a proxy server such as nginx.
If you want to serve /grafana as shown below, you also need to edit */etc/grafana/grafana.ini* and change the *root_url*:
```
root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana/
```
Nginx snippet:
```nginx
location /grafana/ {
proxy_pass http://localhost:3000/;
proxy_set_header Host $host;
}
```

39
doc/readme-hmc.md Normal file
View File

@ -0,0 +1,39 @@
# IBM Power HMC Preparations
Ensure you have **correct date/time** and NTPd running to keep it accurate!
- Login to your HMC
- Navigate to *Console Settings*
- Go to *Change Date and Time*
- Set correct timezone, if not done already
- Configure one or more NTP servers, if not done already
- Enable the NTP client, if not done already
- Navigate to *Users and Security*
- Create a new read-only/viewer **hmci** user, which will be used to connect to the HMC.
- Click *Manage User Profiles and Access*, edit the newly created *hmci* user and click *User Properties*:
- Set *Session timeout minutes* to **120** (or at least 61 minutes)
- Set *Verify timeout minutes* to **15**
- Set *Idle timeout minutes* to **15**
- Set *Minimum time in days between password changes* to **0**
- **Enable** *Allow remote access via the web*
- Navigate to *HMC Management* and *Console Settings*
- Click *Change Performance Monitoring Settings*:
- Enable *Performance Monitoring Data Collection for Managed Servers*: **All On**
- Set *Performance Data Storage* to **1** day or preferable more
If you do not enable *Performance Monitoring Data Collection for Managed Servers*, you will see errors such as *Unexpected response: 403*.
Use the HMCi debug option (*--debug*) to get more details about what is going on.
## Configure date/time through CLI
Example showing how you configure related settings through the HMC CLI:
```shell
chhmc -c date -s modify --datetime MMDDhhmm # Set current date/time: MMDDhhmm[[CC]YY][.ss]
chhmc -c date -s modify --timezone Europe/Copenhagen # Configure your timezone
chhmc -c xntp -s enable # Enable the NTP service
chhmc -c xntp -s add -a IP_Addr # Add a remote NTP server
```
Remember to reboot your HMC after changing the timezone.

10
doc/readme-influxdb.md Normal file
View File

@ -0,0 +1,10 @@
# InfluxDB Notes
## Delete data
To delete *all* data before a specific date, run:
```sql
DELETE WHERE time < '2023-01-01'
```

View File

@ -2,16 +2,16 @@
Please note that the software versions referenced in this document might have changed and might not be available/working unless updated.
More details are available in the [README.md](../README.md) file. If you are running Linux on Power (ppc64le) you should look for ppc64le packages at the [Power DevOps](https://www.power-devops.com/) website.
Ensure you have **correct date/time** and NTPd running to keep it accurate!
All commands should be run as root or through sudo.
## Install the Java Runtime from repository
```shell
dnf install java-11-openjdk-headless
dnf install java-11-openjdk-headless wget
# or
yum install java-11-openjdk-headless
yum install java-11-openjdk-headless wget
```
@ -24,33 +24,45 @@ systemctl daemon-reload
systemctl enable influxdb
systemctl start influxdb
```
If you are running Linux on Power, you can find ppc64le InfluxDB packages on the [Power DevOps](https://www.power-devops.com/influxdb) site. Remember to pick the 1.8 or 1.9 version.
Run the ```influx``` cli command and create the *hmci* database.
```sql
CREATE DATABASE "hmci" WITH DURATION 365d REPLICATION 1;
```
## Download and Install Grafana
```shell
wget https://dl.grafana.com/oss/release/grafana-9.1.3-1.x86_64.rpm
rpm -ivh grafana-9.1.3-1.x86_64.rpm
wget https://dl.grafana.com/oss/release/grafana-9.1.7-1.x86_64.rpm
rpm -ivh grafana-9.1.7-1.x86_64.rpm
systemctl daemon-reload
systemctl enable grafana-server
systemctl start grafana-server
```
When logged in to Grafana (port 3000, admin/admin) create a datasource that points to the local InfluxDB. Now import the provided dashboards.
If you are running Linux on Power, you can find ppc64le Grafana packages on the [Power DevOps](https://www.power-devops.com/grafana) site.
## Download and Install HMCi
[Download](https://git.data.coop/nellemann/-/packages/generic/hmci/) the latest version of HMCi packaged for rpm.
```shell
wget https://bitbucket.org/mnellemann/hmci/downloads/hmci-1.3.1-1_all.rpm
rpm -ivh hmci-1.3.1-1_all.rpm
wget https://git.data.coop/api/packages/nellemann/generic/hmci/v1.4.4/hmci-1.4.2-1.noarch.rpm
rpm -ivh hmci-1.4.4-1_all.rpm
cp /opt/hmci/doc/hmci.toml /etc/
cp /opt/hmci/doc/hmci.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable hmci
systemctl start hmci
```
Now modify */etc/hmci.toml* and test your setup by running ```/opt/hmci/bin/hmci -d``` manually and verify connection to HMC and InfluxDB. Afterwards start service with ```systemctl start hmci``` .
## Configure HMCi
Now modify **/etc/hmci.toml** (edit URL and credentials to your HMCs) and test the setup by running ```/opt/hmci/bin/hmci -d``` in the foreground/terminal and look for any errors.
Press CTRL+C to stop and then start as a background service with ```systemctl start hmci```.
You can see the log/output by running ```journalctl -f -u hmci```.

View File

@ -2,14 +2,14 @@
Please note that the software versions referenced in this document might have changed and might not be available/working unless updated.
More details are available in the [README.md](../README.md) file. If you are running Linux on Power (ppc64le) you should look for ppc64le packages at the [Power DevOps](https://www.power-devops.com/) website.
Ensure you have **correct date/time** and NTPd running to keep it accurate!
All commands should be run as root or through sudo.
## Install the Java Runtime from repository
```shell
zypper install java-11-openjdk-headless
zypper install java-11-openjdk-headless wget
```
@ -23,31 +23,47 @@ systemctl enable influxdb
systemctl start influxdb
```
If you are running Linux on Power, you can find ppc64le InfluxDB packages on the [Power DevOps](https://www.power-devops.com/influxdb) site. Remember to pick the 1.8 or 1.9 version.
Run the ```influx``` cli command and create the *hmci* database.
```sql
CREATE DATABASE "hmci" WITH DURATION 365d REPLICATION 1;
```
## Download and Install Grafana
```shell
wget https://dl.grafana.com/oss/release/grafana-9.1.3-1.x86_64.rpm
rpm -ivh --nodeps grafana-9.1.3-1.x86_64.rpm
wget https://dl.grafana.com/oss/release/grafana-9.1.7-1.x86_64.rpm
rpm -ivh --nodeps grafana-9.1.7-1.x86_64.rpm
systemctl daemon-reload
systemctl enable grafana-server
systemctl start grafana-server
```
When logged in to Grafana (port 3000, admin/admin) create a datasource that points to the local InfluxDB. Now import the provided dashboards.
If you are running Linux on Power, you can find ppc64le Grafana packages on the [Power DevOps](https://www.power-devops.com/grafana) site.
## Download and Install HMCi
[Download](https://git.data.coop/nellemann/-/packages/generic/hmci/) the latest version of HMCi packaged for rpm.
```shell
wget https://bitbucket.org/mnellemann/hmci/downloads/hmci-1.3.1-1_all.rpm
rpm -ivh hmci-1.3.1-1_all.rpm
wget https://git.data.coop/api/packages/nellemann/generic/hmci/v1.4.2/hmci-1.4.2-1.noarch.rpm
rpm -ivh hmci-1.4.2-1_all.rpm
cp /opt/hmci/doc/hmci.toml /etc/
cp /opt/hmci/doc/hmci.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable hmci
```
Now modify */etc/hmci.toml* and test your setup by running ```/opt/hmci/bin/hmci -d``` manually and verify connection to HMC and InfluxDB. Afterwards start service with ```systemctl start hmci``` .
## Configure HMCi
Now modify **/etc/hmci.toml** (edit URL and credentials to your HMCs) and test the setup by running ```/opt/hmci/bin/hmci -d``` in the foreground/terminal and look for any errors.
Press CTRL+C to stop and then start as a background service with ```systemctl start hmci```.
You can see the log/output by running ```journalctl -f -u hmci```.

Binary file not shown.

After

Width:  |  Height:  |  Size: 860 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.8 MiB

BIN
doc/screenshots/vio-io.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 MiB

View File

@ -1,3 +1,3 @@
projectId = hmci
projectGroup = biz.nellemann.hmci
projectVersion = 1.4.0
projectVersion = 1.4.5

Binary file not shown.

View File

@ -1,5 +1,5 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5.1-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-7.6-bin.zip
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

6
gradlew vendored
View File

@ -205,6 +205,12 @@ set -- \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.

14
gradlew.bat vendored
View File

@ -14,7 +14,7 @@
@rem limitations under the License.
@rem
@if "%DEBUG%" == "" @echo off
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@ -25,7 +25,7 @@
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%" == "" set DIRNAME=.
if "%DIRNAME%"=="" set DIRNAME=.
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@ -40,7 +40,7 @@ if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if "%ERRORLEVEL%" == "0" goto execute
if %ERRORLEVEL% equ 0 goto execute
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
@ -75,13 +75,15 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
:end
@rem End local scope for the variables with windows NT shell
if "%ERRORLEVEL%"=="0" goto mainEnd
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1
exit /b 1
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal

View File

@ -15,17 +15,19 @@
*/
package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.toml.Configuration;
import com.fasterxml.jackson.dataformat.toml.TomlMapper;
import picocli.CommandLine;
import picocli.CommandLine.Option;
import picocli.CommandLine.Command;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.Callable;
import com.fasterxml.jackson.dataformat.toml.TomlMapper;
import biz.nellemann.hmci.dto.toml.Configuration;
import picocli.CommandLine;
import picocli.CommandLine.Command;
import picocli.CommandLine.Option;
@Command(name = "hmci",
mixinStandardHelpOptions = true,
versionProvider = biz.nellemann.hmci.VersionProvider.class,
@ -90,7 +92,7 @@ public class Application implements Callable<Integer> {
}
influxClient.logoff();
} catch (Exception e) {
} catch (IOException | InterruptedException e) {
System.err.println(e.getMessage());
return 1;
}

View File

@ -15,66 +15,84 @@
*/
package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.toml.InfluxConfiguration;
import org.influxdb.BatchOptions;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.Point;
import static java.lang.Thread.sleep;
import java.util.ArrayList;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.time.Instant;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
import com.influxdb.client.InfluxDBClient;
import com.influxdb.client.InfluxDBClientFactory;
import com.influxdb.client.WriteApi;
import com.influxdb.client.WriteOptions;
import com.influxdb.client.domain.WritePrecision;
import com.influxdb.client.write.Point;
import biz.nellemann.hmci.dto.toml.InfluxConfiguration;
import static java.lang.Thread.sleep;
public final class InfluxClient {
private final static Logger log = LoggerFactory.getLogger(InfluxClient.class);
final private String url;
final private String username;
final private String password;
final private String database;
final private String org; // v2 only
final private String token;
final private String bucket; // Bucket in v2, Database in v1
private InfluxDBClient influxDBClient;
private WriteApi writeApi;
private InfluxDB influxDB;
InfluxClient(InfluxConfiguration config) {
this.url = config.url;
this.username = config.username;
this.password = config.password;
this.database = config.database;
if(config.org != null) {
this.org = config.org;
} else {
this.org = "hmci"; // In InfluxDB 1.x, there is no concept of organization.
}
if(config.token != null) {
this.token = config.token;
} else {
this.token = config.username + ":" + config.password;
}
if(config.bucket != null) {
this.bucket = config.bucket;
} else {
this.bucket = config.database;
}
}
synchronized void login() throws RuntimeException, InterruptedException {
if(influxDB != null) {
if(influxDBClient != null) {
return;
}
boolean connected = false;
int loginErrors = 0;
do {
try {
log.debug("Connecting to InfluxDB - {}", url);
influxDB = InfluxDBFactory.connect(url, username, password).setDatabase(database);
influxDB.version(); // This ensures that we actually try to connect to the db
influxDBClient = InfluxDBClientFactory.create(url, token.toCharArray(), org, bucket);
influxDBClient.version(); // This ensures that we actually try to connect to the db
Runtime.getRuntime().addShutdownHook(new Thread(influxDBClient::close));
influxDB.enableBatch(
BatchOptions.DEFAULTS
.threadFactory(runnable -> {
Thread thread = new Thread(runnable);
thread.setDaemon(true);
return thread;
})
);
Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close));
// Todo: Handle events - https://github.com/influxdata/influxdb-client-java/tree/master/client#handle-the-events
writeApi = influxDBClient.makeWriteApi(
WriteOptions.builder()
.batchSize(15_000)
.bufferLimit(500_000)
.flushInterval(5_000)
.build());
connected = true;
} catch(Exception e) {
sleep(15 * 1000);
if(loginErrors++ > 3) {
@ -90,60 +108,33 @@ public final class InfluxClient {
synchronized void logoff() {
if(influxDB != null) {
influxDB.close();
if(influxDBClient != null) {
influxDBClient.close();
}
influxDB = null;
influxDBClient = null;
}
public void write(List<Measurement> measurements, Instant timestamp, String measurement) {
log.debug("write() - measurement: {} {}", measurement, measurements.size());
processMeasurementMap(measurements, timestamp, measurement).forEach( (point) -> { influxDB.write(point); });
public void write(List<Measurement> measurements, String name) {
log.debug("write() - measurement: {} {}", name, measurements.size());
if(!measurements.isEmpty()) {
processMeasurementMap(measurements, name).forEach((point) -> {
writeApi.writePoint(point);
});
}
}
private List<Point> processMeasurementMap(List<Measurement> measurements, Instant timestamp, String measurement) {
private List<Point> processMeasurementMap(List<Measurement> measurements, String name) {
List<Point> listOfPoints = new ArrayList<>();
measurements.forEach( (m) -> {
Point.Builder builder = Point.measurement(measurement)
.time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS)
.tag(m.tags)
.fields(m.fields);
/*
// Iterate fields
m.fields.forEach((fieldName, fieldValue) -> {
log.info("processMeasurementMap() {} - fieldName: {}, fieldValue: {}", measurement, fieldName, fieldValue);
if(fieldValue instanceof Number) {
Number num = (Number) fieldValue;
builder.addField(fieldName, num);
} else if(fieldValue instanceof Boolean) {
Boolean bol = (Boolean) fieldValue;
builder.addField(fieldName, bol);
} else {
String str = (String) fieldValue;
builder.addField(fieldName, str);
}
});
// Iterate sorted tags
Map<String, String> sortedTags = new TreeMap<>(m.tags);
sortedTags.forEach((tagName, tagValue) -> {
log.info("processMeasurementMap() {} - tagName: {}, tagValue: {}", measurement, tagName, tagValue);
builder.tag(tagName, tagValue);
});
*/
/*
if(m.fields.size() > 0 && m.tags.size() > 0) {
listOfPoints.add(builderbuilder.build());
}*/
listOfPoints.add(builder.build());
log.trace("processMeasurementMap() - timestamp: {}, tags: {}, fields: {}", m.timestamp, m.tags, m.fields);
Point point = new Point(name)
.time(m.timestamp.getEpochSecond(), WritePrecision.S)
.addTags(m.tags)
.addFields(m.fields);
listOfPoints.add(point);
});
return listOfPoints;
}

View File

@ -19,7 +19,6 @@ import biz.nellemann.hmci.dto.xml.Link;
import biz.nellemann.hmci.dto.xml.LogicalPartitionEntry;
import biz.nellemann.hmci.dto.xml.XmlEntry;
import biz.nellemann.hmci.dto.xml.XmlFeed;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -28,16 +27,16 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeoutException;
class LogicalPartition extends Resource {
private final static Logger log = LoggerFactory.getLogger(LogicalPartition.class);
private final RestClient restClient;
private final InfluxClient influxClient;
private final ManagedSystem managedSystem;
protected String id;
protected String name;
protected LogicalPartitionEntry entry;
@ -45,9 +44,10 @@ class LogicalPartition extends Resource {
private String uriPath;
public LogicalPartition(RestClient restClient, String href, ManagedSystem managedSystem) throws URISyntaxException {
public LogicalPartition(RestClient restClient, InfluxClient influxClient, String href, ManagedSystem managedSystem) throws URISyntaxException {
log.debug("LogicalPartition() - {}", href);
this.restClient = restClient;
this.influxClient = influxClient;
this.managedSystem = managedSystem;
try {
URI uri = new URI(href);
@ -99,9 +99,9 @@ class LogicalPartition extends Resource {
public void refresh() {
log.debug("refresh()");
log.debug("refresh() - {}", name);
try {
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/LogicalPartition/%s/ProcessedMetrics?NoOfSamples=1", managedSystem.id, id));
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/LogicalPartition/%s/ProcessedMetrics?NoOfSamples=%d", managedSystem.id, id, noOfSamples));
// Do not try to parse empty response
if(xml == null || xml.length() <= 1) {
@ -134,248 +134,235 @@ class LogicalPartition extends Resource {
}
@Override
public void process(int sample) throws NullPointerException {
log.debug("process() - {} - sample: {}", name, sample);
influxClient.write(getDetails(sample),"lpar_details");
influxClient.write(getMemoryMetrics(sample),"lpar_memory");
influxClient.write(getProcessorMetrics(sample),"lpar_processor");
influxClient.write(getSriovLogicalPorts(sample),"lpar_net_sriov");
influxClient.write(getVirtualEthernetAdapterMetrics(sample),"lpar_net_virtual");
influxClient.write(getVirtualGenericAdapterMetrics(sample),"lpar_storage_virtual");
influxClient.write(getVirtualFibreChannelAdapterMetrics(sample),"lpar_storage_vFC");
}
// LPAR Details
List<Measurement> getDetails() {
List<Measurement> getDetails(int sample) throws NullPointerException {
log.debug("getDetails()");
List<Measurement> list = new ArrayList<>();
try {
Map<String, String> tagsMap = new HashMap<>();
TreeMap<String, Object> fieldsMap = new TreeMap<>();
Map<String, String> tagsMap = new HashMap<>();
TreeMap<String, Object> fieldsMap = new TreeMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getDetails() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getDetails() - tags: " + tagsMap);
fieldsMap.put("id", metric.getSample().lparsUtil.id);
fieldsMap.put("type", metric.getSample().lparsUtil.type);
fieldsMap.put("state", metric.getSample().lparsUtil.state);
fieldsMap.put("osType", metric.getSample().lparsUtil.osType);
fieldsMap.put("affinityScore", metric.getSample().lparsUtil.affinityScore);
log.trace("getDetails() - fields: " + fieldsMap);
fieldsMap.put("id", metric.getSample(sample).lparsUtil.id);
fieldsMap.put("type", metric.getSample(sample).lparsUtil.type);
fieldsMap.put("state", metric.getSample(sample).lparsUtil.state);
fieldsMap.put("osType", metric.getSample(sample).lparsUtil.osType);
fieldsMap.put("affinityScore", metric.getSample(sample).lparsUtil.affinityScore);
log.trace("getDetails() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getDetails() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
// LPAR Memory
List<Measurement> getMemoryMetrics() {
List<Measurement> getMemoryMetrics(int sample) throws NullPointerException {
log.debug("getMemoryMetrics()");
List<Measurement> list = new ArrayList<>();
try {
Map<String, String> tagsMap = new HashMap<>();
TreeMap<String, Object> fieldsMap = new TreeMap<>();
Map<String, String> tagsMap = new HashMap<>();
TreeMap<String, Object> fieldsMap = new TreeMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getMemoryMetrics() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getMemoryMetrics() - tags: " + tagsMap);
fieldsMap.put("logicalMem", metric.getSample().lparsUtil.memory.logicalMem);
fieldsMap.put("backedPhysicalMem", metric.getSample().lparsUtil.memory.backedPhysicalMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
fieldsMap.put("logicalMem", metric.getSample(sample).lparsUtil.memory.logicalMem);
fieldsMap.put("backedPhysicalMem", metric.getSample(sample).lparsUtil.memory.backedPhysicalMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getMemoryMetrics() - error: {}", e.getMessage());
}
return list;
}
// LPAR Processor
List<Measurement> getProcessorMetrics() {
List<Measurement> getProcessorMetrics(int sample) throws NullPointerException {
log.debug("getProcessorMetrics()");
List<Measurement> list = new ArrayList<>();
try {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getProcessorMetrics() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
log.trace("getProcessorMetrics() - tags: " + tagsMap);
fieldsMap.put("utilizedProcUnits", metric.getSample().lparsUtil.processor.utilizedProcUnits);
fieldsMap.put("entitledProcUnits", metric.getSample().lparsUtil.processor.entitledProcUnits);
fieldsMap.put("donatedProcUnits", metric.getSample().lparsUtil.processor.donatedProcUnits);
fieldsMap.put("idleProcUnits", metric.getSample().lparsUtil.processor.idleProcUnits);
fieldsMap.put("maxProcUnits", metric.getSample().lparsUtil.processor.maxProcUnits);
fieldsMap.put("maxVirtualProcessors", metric.getSample().lparsUtil.processor.maxVirtualProcessors);
fieldsMap.put("currentVirtualProcessors", metric.getSample().lparsUtil.processor.currentVirtualProcessors);
fieldsMap.put("utilizedCappedProcUnits", metric.getSample().lparsUtil.processor.utilizedCappedProcUnits);
fieldsMap.put("utilizedUncappedProcUnits", metric.getSample().lparsUtil.processor.utilizedUncappedProcUnits);
fieldsMap.put("timePerInstructionExecution", metric.getSample().lparsUtil.processor.timeSpentWaitingForDispatch);
fieldsMap.put("timeSpentWaitingForDispatch", metric.getSample().lparsUtil.processor.timePerInstructionExecution);
fieldsMap.put("mode", metric.getSample().lparsUtil.processor.mode);
fieldsMap.put("weight", metric.getSample().lparsUtil.processor.weight);
fieldsMap.put("poolId", metric.getSample().lparsUtil.processor.poolId);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
fieldsMap.put("utilizedProcUnits", metric.getSample(sample).lparsUtil.processor.utilizedProcUnits);
fieldsMap.put("entitledProcUnits", metric.getSample(sample).lparsUtil.processor.entitledProcUnits);
fieldsMap.put("donatedProcUnits", metric.getSample(sample).lparsUtil.processor.donatedProcUnits);
fieldsMap.put("idleProcUnits", metric.getSample(sample).lparsUtil.processor.idleProcUnits);
fieldsMap.put("maxProcUnits", metric.getSample(sample).lparsUtil.processor.maxProcUnits);
fieldsMap.put("maxVirtualProcessors", metric.getSample(sample).lparsUtil.processor.maxVirtualProcessors);
fieldsMap.put("currentVirtualProcessors", metric.getSample(sample).lparsUtil.processor.currentVirtualProcessors);
fieldsMap.put("utilizedCappedProcUnits", metric.getSample(sample).lparsUtil.processor.utilizedCappedProcUnits);
fieldsMap.put("utilizedUncappedProcUnits", metric.getSample(sample).lparsUtil.processor.utilizedUncappedProcUnits);
fieldsMap.put("timePerInstructionExecution", metric.getSample(sample).lparsUtil.processor.timeSpentWaitingForDispatch);
fieldsMap.put("timeSpentWaitingForDispatch", metric.getSample(sample).lparsUtil.processor.timePerInstructionExecution);
fieldsMap.put("mode", metric.getSample(sample).lparsUtil.processor.mode);
fieldsMap.put("weight", metric.getSample(sample).lparsUtil.processor.weight);
fieldsMap.put("poolId", metric.getSample(sample).lparsUtil.processor.poolId);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getProcessorMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
// LPAR Network - Virtual
List<Measurement> getVirtualEthernetAdapterMetrics() {
List<Measurement> getVirtualEthernetAdapterMetrics(int sample) throws NullPointerException {
log.debug("getVirtualEthernetAdapterMetrics()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().lparsUtil.network.virtualEthernetAdapters.forEach(adapter -> {
metric.getSample(sample).lparsUtil.network.virtualEthernetAdapters.forEach(adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("vlanId", adapter.vlanId.toString());
tagsMap.put("vswitchId", adapter.vswitchId.toString());
log.trace("getVirtualEthernetAdapterMetrics() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("vlanId", adapter.vlanId.toString());
tagsMap.put("vswitchId", adapter.vswitchId.toString());
log.trace("getVirtualEthernetAdapterMetrics() - tags: " + tagsMap);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("droppedPhysicalPackets", adapter.droppedPhysicalPackets);
fieldsMap.put("isPortVlanId", adapter.isPortVlanId);
fieldsMap.put("receivedPhysicalBytes", adapter.receivedPhysicalBytes);
fieldsMap.put("receivedPhysicalPackets", adapter.receivedPhysicalPackets);
fieldsMap.put("sentPhysicalBytes", adapter.sentPhysicalBytes);
fieldsMap.put("sentPhysicalPackets", adapter.sentPhysicalPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
fieldsMap.put("sharedEthernetAdapterId", adapter.sharedEthernetAdapterId);
log.trace("getVirtualEthernetAdapterMetrics() - fields: " + fieldsMap);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("droppedPhysicalPackets", adapter.droppedPhysicalPackets);
fieldsMap.put("isPortVlanId", adapter.isPortVlanId);
fieldsMap.put("receivedPhysicalBytes", adapter.receivedPhysicalBytes);
fieldsMap.put("receivedPhysicalPackets", adapter.receivedPhysicalPackets);
fieldsMap.put("sentPhysicalBytes", adapter.sentPhysicalBytes);
fieldsMap.put("sentPhysicalPackets", adapter.sentPhysicalPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
fieldsMap.put("sharedEthernetAdapterId", adapter.sharedEthernetAdapterId);
log.trace("getVirtualEthernetAdapterMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVirtualEthernetAdapterMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// LPAR Storage - Virtual Generic
List<Measurement> getVirtualGenericAdapterMetrics() {
List<Measurement> getVirtualGenericAdapterMetrics(int sample) throws NullPointerException {
log.debug("getVirtualGenericAdapterMetrics()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().lparsUtil.storage.genericVirtualAdapters.forEach(adapter -> {
metric.getSample(sample).lparsUtil.storage.genericVirtualAdapters.forEach(adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.trace("getVirtualGenericAdapterMetrics() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.trace("getVirtualGenericAdapterMetrics() - tags: " + tagsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("type", adapter.type);
log.trace("getVirtualGenericAdapterMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVirtualGenericAdapterMetrics() - error: {}", e.getMessage());
}
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("type", adapter.type);
log.trace("getVirtualGenericAdapterMetrics() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// LPAR Storage - Virtual FC
List<Measurement> getVirtualFibreChannelAdapterMetrics() {
// LPAR Storage - Virtual FC
List<Measurement> getVirtualFibreChannelAdapterMetrics(int sample) throws NullPointerException {
log.debug("getVirtualFibreChannelAdapterMetrics()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().lparsUtil.storage.virtualFiberChannelAdapters.forEach(adapter -> {
metric.getSample(sample).lparsUtil.storage.virtualFiberChannelAdapters.forEach(adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVirtualFibreChannelAdapterMetrics() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("viosId", adapter.viosId.toString());
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVirtualFibreChannelAdapterMetrics() - tags: " + tagsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("runningSpeed", adapter.runningSpeed);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("transferredByte", adapter.transmittedBytes); // TODO: Must be error in dashboard, remove when checked.
log.trace("getVirtualFibreChannelAdapterMetrics() - fields: " + fieldsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("runningSpeed", adapter.runningSpeed);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
log.trace("getVirtualFibreChannelAdapterMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVirtualFibreChannelAdapterMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// LPAR Network - SR-IOV Logical Ports
List<Measurement> getSriovLogicalPorts() {
List<Measurement> getSriovLogicalPorts(int sample) throws NullPointerException {
log.debug("getSriovLogicalPorts()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().lparsUtil.network.sriovLogicalPorts.forEach(port -> {
metric.getSample(sample).lparsUtil.network.sriovLogicalPorts.forEach(port -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("location", port.physicalLocation);
tagsMap.put("type", port.configurationType);
log.trace("getSriovLogicalPorts() - tags: " + tagsMap);
tagsMap.put("servername", managedSystem.entry.getName());
tagsMap.put("lparname", entry.getName());
tagsMap.put("location", port.physicalLocation);
log.trace("getSriovLogicalPorts() - tags: " + tagsMap);
fieldsMap.put("sentBytes", port.sentBytes);
fieldsMap.put("receivedBytes", port.receivedBytes);
fieldsMap.put("transferredBytes", port.transferredBytes);
fieldsMap.put("sentPackets", port.sentPackets);
fieldsMap.put("receivedPackets", port.receivedPackets);
fieldsMap.put("droppedPackets", port.droppedPackets);
fieldsMap.put("errorIn", port.errorIn);
fieldsMap.put("errorOut", port.errorOut);
log.trace("getSriovLogicalPorts() - fields: " + fieldsMap);
fieldsMap.put("sentBytes", port.sentBytes);
fieldsMap.put("receivedBytes", port.receivedBytes);
fieldsMap.put("transferredBytes", port.transferredBytes);
fieldsMap.put("sentPackets", port.sentPackets);
fieldsMap.put("receivedPackets", port.receivedPackets);
fieldsMap.put("droppedPackets", port.droppedPackets);
fieldsMap.put("errorIn", port.errorIn);
fieldsMap.put("errorOut", port.errorOut);
log.trace("getSriovLogicalPorts() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getSriovLogicalPorts() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
}

View File

@ -17,8 +17,6 @@ package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.xml.*;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -39,6 +37,8 @@ class ManagedSystem extends Resource {
private List<String> includePartitions = new ArrayList<>();
private final RestClient restClient;
private final InfluxClient influxClient;
protected ManagedSystemEntry entry;
@ -52,9 +52,10 @@ class ManagedSystem extends Resource {
public String id;
public ManagedSystem(RestClient restClient, String href) {
public ManagedSystem(RestClient restClient, InfluxClient influxClient, String href) {
log.debug("ManagedSystem() - {}", href);
this.restClient = restClient;
this.influxClient = influxClient;
try {
URI uri = new URI(href);
uriPath = uri.getPath();
@ -89,6 +90,7 @@ class ManagedSystem extends Resource {
setPcmPreference();
}
systemEnergy = new SystemEnergy(restClient, influxClient, this);
}
@ -122,7 +124,7 @@ class ManagedSystem extends Resource {
logicalPartitions.clear();
for (Link link : this.entry.getAssociatedLogicalPartitions()) {
LogicalPartition logicalPartition = new LogicalPartition(restClient, link.getHref(), this);
LogicalPartition logicalPartition = new LogicalPartition(restClient, influxClient, link.getHref(), this);
logicalPartition.discover();
if(Objects.equals(logicalPartition.entry.partitionState, "running")) {
// Check exclude / include
@ -152,9 +154,9 @@ class ManagedSystem extends Resource {
public void refresh() {
log.debug("refresh()");
log.debug("refresh() - {}", name);
try {
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?NoOfSamples=1", id));
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?NoOfSamples=%d", id, noOfSamples));
// Do not try to parse empty response
if(xml == null || xml.length() <= 1) {
@ -180,6 +182,12 @@ class ManagedSystem extends Resource {
}
});
if(systemEnergy != null) {
systemEnergy.refresh();
}
logicalPartitions.forEach(LogicalPartition::refresh);
} catch (JsonParseException e) {
log.warn("refresh() - parse error for: {}", name);
metric = null;
@ -190,6 +198,38 @@ class ManagedSystem extends Resource {
}
@Override
public void process(int sample) throws NullPointerException {
log.debug("process() - {} - sample: {}", name, sample);
influxClient.write(getDetails(sample),"server_details");
influxClient.write(getMemoryMetrics(sample),"server_memory");
influxClient.write(getProcessorMetrics(sample), "server_processor");
influxClient.write(getPhysicalProcessorPool(sample),"server_physicalProcessorPool");
influxClient.write(getSharedProcessorPools(sample),"server_sharedProcessorPool");
if(systemEnergy != null) {
systemEnergy.process();
}
influxClient.write(getVioDetails(sample),"vios_details");
influxClient.write(getVioProcessorMetrics(sample),"vios_processor");
influxClient.write(getVioMemoryMetrics(sample),"vios_memory");
influxClient.write(getVioNetworkLpars(sample),"vios_network_lpars");
influxClient.write(getVioNetworkVirtualAdapters(sample),"vios_network_virtual");
influxClient.write(getVioNetworkSharedAdapters(sample),"vios_network_shared");
influxClient.write(getVioNetworkGenericAdapters(sample),"vios_network_generic");
influxClient.write(getVioStorageLpars(sample),"vios_storage_lpars");
influxClient.write(getVioStorageFiberChannelAdapters(sample),"vios_storage_FC");
influxClient.write(getVioStorageVirtualAdapters(sample),"vios_storage_virtual");
influxClient.write(getVioStoragePhysicalAdapters(sample),"vios_storage_physical");
// Missing: vios_storage_SSP
logicalPartitions.forEach(Resource::process);
}
public void setPcmPreference() {
log.info("setPcmPreference()");
@ -250,149 +290,124 @@ class ManagedSystem extends Resource {
// System details
List<Measurement> getDetails() {
List<Measurement> getDetails(int sample) throws NullPointerException {
log.debug("getDetails()");
List<Measurement> list = new ArrayList<>();
Map<String, String> tagsMap = new TreeMap<>();
Map<String, Object> fieldsMap = new TreeMap<>();
try {
Map<String, String> tagsMap = new TreeMap<>();
Map<String, Object> fieldsMap = new TreeMap<>();
tagsMap.put("servername", entry.getName());
log.trace("getDetails() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
log.trace("getDetails() - tags: " + tagsMap);
fieldsMap.put("mtm", String.format("%s-%s %s",
entry.getMachineTypeModelAndSerialNumber().getMachineType(),
entry.getMachineTypeModelAndSerialNumber().getModel(),
entry.getMachineTypeModelAndSerialNumber().getSerialNumber())
);
fieldsMap.put("APIversion", metric.getUtilInfo().version);
fieldsMap.put("metric", metric.utilInfo.metricType);
fieldsMap.put("frequency", metric.getUtilInfo().frequency);
fieldsMap.put("nextract", "HMCi");
fieldsMap.put("name", entry.getName());
fieldsMap.put("utilizedProcUnits", metric.getSample(sample).systemFirmwareUtil.utilizedProcUnits);
fieldsMap.put("assignedMem", metric.getSample(sample).systemFirmwareUtil.assignedMem);
log.trace("getDetails() - fields: " + fieldsMap);
fieldsMap.put("mtm", String.format("%s-%s %s",
entry.getMachineTypeModelAndSerialNumber().getMachineType(),
entry.getMachineTypeModelAndSerialNumber().getModel(),
entry.getMachineTypeModelAndSerialNumber().getSerialNumber())
);
fieldsMap.put("APIversion", metric.getUtilInfo().version);
fieldsMap.put("metric", metric.utilInfo.metricType);
fieldsMap.put("frequency", metric.getUtilInfo().frequency);
fieldsMap.put("nextract", "HMCi");
fieldsMap.put("name", entry.getName());
fieldsMap.put("utilizedProcUnits", metric.getSample().systemFirmwareUtil.utilizedProcUnits);
fieldsMap.put("assignedMem", metric.getSample().systemFirmwareUtil.assignedMem);
log.trace("getDetails() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getDetails() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
// System Memory
List<Measurement> getMemoryMetrics() {
List<Measurement> getMemoryMetrics(int sample) throws NullPointerException {
log.debug("getMemoryMetrics()");
List<Measurement> list = new ArrayList<>();
HashMap<String, String> tagsMap = new HashMap<>();
Map<String, Object> fieldsMap = new HashMap<>();
try {
HashMap<String, String> tagsMap = new HashMap<>();
Map<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
log.trace("getMemoryMetrics() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
log.trace("getMemoryMetrics() - tags: " + tagsMap);
fieldsMap.put("totalMem", metric.getSample(sample).serverUtil.memory.totalMem);
fieldsMap.put("availableMem", metric.getSample(sample).serverUtil.memory.availableMem);
fieldsMap.put("configurableMem", metric.getSample(sample).serverUtil.memory.configurableMem);
fieldsMap.put("assignedMemToLpars", metric.getSample(sample).serverUtil.memory.assignedMemToLpars);
fieldsMap.put("virtualPersistentMem", metric.getSample(sample).serverUtil.memory.virtualPersistentMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
fieldsMap.put("totalMem", metric.getSample().serverUtil.memory.totalMem);
fieldsMap.put("availableMem", metric.getSample().serverUtil.memory.availableMem);
fieldsMap.put("configurableMem", metric.getSample().serverUtil.memory.configurableMem);
fieldsMap.put("assignedMemToLpars", metric.getSample().serverUtil.memory.assignedMemToLpars);
fieldsMap.put("virtualPersistentMem", metric.getSample().serverUtil.memory.virtualPersistentMem);
log.trace("getMemoryMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getMemoryMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
// System Processor
List<Measurement> getProcessorMetrics() {
List<Measurement> getProcessorMetrics(int sample) throws NullPointerException {
log.debug("getProcessorMetrics()");
List<Measurement> list = new ArrayList<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
try {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
log.trace("getProcessorMetrics() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
log.trace("getProcessorMetrics() - tags: " + tagsMap);
fieldsMap.put("totalProcUnits", metric.getSample(sample).serverUtil.processor.totalProcUnits);
fieldsMap.put("utilizedProcUnits", metric.getSample(sample).serverUtil.processor.utilizedProcUnits);
fieldsMap.put("availableProcUnits", metric.getSample(sample).serverUtil.processor.availableProcUnits);
fieldsMap.put("configurableProcUnits", metric.getSample(sample).serverUtil.processor.configurableProcUnits);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
fieldsMap.put("totalProcUnits", metric.getSample().serverUtil.processor.totalProcUnits);
fieldsMap.put("utilizedProcUnits", metric.getSample().serverUtil.processor.utilizedProcUnits);
fieldsMap.put("availableProcUnits", metric.getSample().serverUtil.processor.availableProcUnits);
fieldsMap.put("configurableProcUnits", metric.getSample().serverUtil.processor.configurableProcUnits);
log.trace("getProcessorMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getProcessorMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
// Sytem Shared ProcessorPools
List<Measurement> getSharedProcessorPools() {
List<Measurement> getSharedProcessorPools(int sample) throws NullPointerException {
log.debug("getSharedProcessorPools()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().serverUtil.sharedProcessorPool.forEach(sharedProcessorPool -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("pool", String.valueOf(sharedProcessorPool.id));
tagsMap.put("poolname", sharedProcessorPool.name);
log.trace("getSharedProcessorPools() - tags: " + tagsMap);
fieldsMap.put("assignedProcUnits", sharedProcessorPool.assignedProcUnits);
fieldsMap.put("availableProcUnits", sharedProcessorPool.availableProcUnits);
fieldsMap.put("utilizedProcUnits", sharedProcessorPool.utilizedProcUnits);
fieldsMap.put("borrowedProcUnits", sharedProcessorPool.borrowedProcUnits);
fieldsMap.put("configuredProcUnits", sharedProcessorPool.configuredProcUnits);
log.trace("getSharedProcessorPools() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getSharedProcessorPools() - error: {}", e.getMessage());
}
return list;
}
// System Physical ProcessorPool
List<Measurement> getPhysicalProcessorPool() {
List<Measurement> list = new ArrayList<>();
try {
metric.getSample(sample).serverUtil.sharedProcessorPool.forEach(sharedProcessorPool -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
log.trace("getPhysicalProcessorPool() - tags: " + tagsMap);
tagsMap.put("pool", String.valueOf(sharedProcessorPool.id));
tagsMap.put("poolname", sharedProcessorPool.name);
log.trace("getSharedProcessorPools() - tags: " + tagsMap);
fieldsMap.put("assignedProcUnits", metric.getSample().serverUtil.physicalProcessorPool.assignedProcUnits);
fieldsMap.put("availableProcUnits", metric.getSample().serverUtil.physicalProcessorPool.availableProcUnits);
fieldsMap.put("utilizedProcUnits", metric.getSample().serverUtil.physicalProcessorPool.utilizedProcUnits);
fieldsMap.put("configuredProcUnits", metric.getSample().serverUtil.physicalProcessorPool.configuredProcUnits);
fieldsMap.put("borrowedProcUnits", metric.getSample().serverUtil.physicalProcessorPool.borrowedProcUnits);
log.trace("getPhysicalProcessorPool() - fields: " + fieldsMap);
fieldsMap.put("assignedProcUnits", sharedProcessorPool.assignedProcUnits);
fieldsMap.put("availableProcUnits", sharedProcessorPool.availableProcUnits);
fieldsMap.put("utilizedProcUnits", sharedProcessorPool.utilizedProcUnits);
fieldsMap.put("borrowedProcUnits", sharedProcessorPool.borrowedProcUnits);
fieldsMap.put("configuredProcUnits", sharedProcessorPool.configuredProcUnits);
log.trace("getSharedProcessorPools() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getPhysicalProcessorPool() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// System Physical ProcessorPool
List<Measurement> getPhysicalProcessorPool(int sample) throws NullPointerException {
log.debug("getPhysicalProcessorPool()");
List<Measurement> list = new ArrayList<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
log.trace("getPhysicalProcessorPool() - tags: " + tagsMap);
fieldsMap.put("assignedProcUnits", metric.getSample(sample).serverUtil.physicalProcessorPool.assignedProcUnits);
fieldsMap.put("availableProcUnits", metric.getSample(sample).serverUtil.physicalProcessorPool.availableProcUnits);
fieldsMap.put("utilizedProcUnits", metric.getSample(sample).serverUtil.physicalProcessorPool.utilizedProcUnits);
fieldsMap.put("configuredProcUnits", metric.getSample(sample).serverUtil.physicalProcessorPool.configuredProcUnits);
fieldsMap.put("borrowedProcUnits", metric.getSample(sample).serverUtil.physicalProcessorPool.borrowedProcUnits);
log.trace("getPhysicalProcessorPool() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
return list;
}
@ -404,385 +419,340 @@ class ManagedSystem extends Resource {
// VIO Details
List<Measurement> getVioDetails() {
List<Measurement> getVioDetails(int sample) throws NullPointerException {
log.debug("getVioDetails()");
List<Measurement> list = new ArrayList<>();
metric.getSample(sample).viosUtil.forEach(vio -> {
try {
metric.getSample().viosUtil.forEach(vio -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioDetails() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioDetails() - tags: " + tagsMap);
fieldsMap.put("viosid", vio.id);
fieldsMap.put("viosstate", vio.state);
fieldsMap.put("viosname", vio.name);
fieldsMap.put("affinityScore", vio.affinityScore);
log.trace("getVioDetails() - fields: " + fieldsMap);
fieldsMap.put("viosid", vio.id);
fieldsMap.put("viosstate", vio.state);
fieldsMap.put("viosname", vio.name);
fieldsMap.put("affinityScore", vio.affinityScore);
log.trace("getVioDetails() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioDetails() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// VIO Memory
List<Measurement> getVioMemoryMetrics() {
List<Measurement> getVioMemoryMetrics(int sample) throws NullPointerException {
log.debug("getVioMemoryMetrics()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach(vio -> {
metric.getSample(sample).viosUtil.forEach(vio -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioMemoryMetrics() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioMemoryMetrics() - tags: " + tagsMap);
Number assignedMem = vio.memory.assignedMem;
Number utilizedMem = vio.memory.utilizedMem;
Number usedMemPct = (utilizedMem.intValue() * 100 ) / assignedMem.intValue();
fieldsMap.put("assignedMem", vio.memory.assignedMem);
fieldsMap.put("utilizedMem", vio.memory.utilizedMem);
fieldsMap.put("utilizedPct", usedMemPct.floatValue());
log.trace("getVioMemoryMetrics() - fields: " + fieldsMap);
Number assignedMem = vio.memory.assignedMem;
Number utilizedMem = vio.memory.utilizedMem;
Number usedMemPct = (utilizedMem.intValue() * 100 ) / assignedMem.intValue();
fieldsMap.put("assignedMem", vio.memory.assignedMem);
fieldsMap.put("utilizedMem", vio.memory.utilizedMem);
fieldsMap.put("utilizedPct", usedMemPct.floatValue());
log.trace("getVioMemoryMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioMemoryMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// VIO Processor
List<Measurement> getVioProcessorMetrics() {
List<Measurement> getVioProcessorMetrics(int sample) throws NullPointerException {
log.debug("getVioProcessorMetrics()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach(vio -> {
metric.getSample(sample).viosUtil.forEach(vio -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioProcessorMetrics() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioProcessorMetrics() - tags: " + tagsMap);
fieldsMap.put("utilizedProcUnits", vio.processor.utilizedProcUnits);
fieldsMap.put("utilizedCappedProcUnits", vio.processor.utilizedCappedProcUnits);
fieldsMap.put("utilizedUncappedProcUnits", vio.processor.utilizedUncappedProcUnits);
fieldsMap.put("currentVirtualProcessors", vio.processor.currentVirtualProcessors);
fieldsMap.put("maxVirtualProcessors", vio.processor.maxVirtualProcessors);
fieldsMap.put("maxProcUnits", vio.processor.maxProcUnits);
fieldsMap.put("entitledProcUnits", vio.processor.entitledProcUnits);
fieldsMap.put("donatedProcUnits", vio.processor.donatedProcUnits);
fieldsMap.put("idleProcUnits", vio.processor.idleProcUnits);
fieldsMap.put("timeSpentWaitingForDispatch", vio.processor.timePerInstructionExecution);
fieldsMap.put("timePerInstructionExecution", vio.processor.timeSpentWaitingForDispatch);
fieldsMap.put("weight", vio.processor.weight);
fieldsMap.put("mode", vio.processor.mode);
log.trace("getVioProcessorMetrics() - fields: " + fieldsMap);
fieldsMap.put("utilizedProcUnits", vio.processor.utilizedProcUnits);
fieldsMap.put("utilizedCappedProcUnits", vio.processor.utilizedCappedProcUnits);
fieldsMap.put("utilizedUncappedProcUnits", vio.processor.utilizedUncappedProcUnits);
fieldsMap.put("currentVirtualProcessors", vio.processor.currentVirtualProcessors);
fieldsMap.put("maxVirtualProcessors", vio.processor.maxVirtualProcessors);
fieldsMap.put("maxProcUnits", vio.processor.maxProcUnits);
fieldsMap.put("entitledProcUnits", vio.processor.entitledProcUnits);
fieldsMap.put("donatedProcUnits", vio.processor.donatedProcUnits);
fieldsMap.put("idleProcUnits", vio.processor.idleProcUnits);
fieldsMap.put("timeSpentWaitingForDispatch", vio.processor.timePerInstructionExecution);
fieldsMap.put("timePerInstructionExecution", vio.processor.timeSpentWaitingForDispatch);
fieldsMap.put("weight", vio.processor.weight);
fieldsMap.put("mode", vio.processor.mode);
log.trace("getVioProcessorMetrics() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioProcessorMetrics() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// VIOs - Network
List<Measurement> getVioNetworkLpars() {
List<Measurement> getVioNetworkLpars(int sample) throws NullPointerException {
log.debug("getVioNetworkLpars()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach(vio -> {
metric.getSample(sample).viosUtil.forEach(vio -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioNetworkLpars() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioNetworkLpars() - tags: " + tagsMap);
fieldsMap.put("clientlpars", vio.network.clientLpars.size());
log.trace("getVioNetworkLpars() - fields: " + fieldsMap);
fieldsMap.put("clientlpars", vio.network.clientLpars.size());
log.trace("getVioNetworkLpars() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioNetworkLpars() - error: {}", e.getMessage());
}
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
return list;
}
// VIO Network - Shared
List<Measurement> getVioNetworkSharedAdapters() {
List<Measurement> getVioNetworkSharedAdapters(int sample) throws NullPointerException {
log.debug("getVioNetworkSharedAdapters()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach(vio -> {
vio.network.sharedAdapters.forEach(adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
metric.getSample(sample).viosUtil.forEach(vio -> {
vio.network.sharedAdapters.forEach(adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
//tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkSharedAdapters() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
//tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkSharedAdapters() - tags: " + tagsMap);
fieldsMap.put("id", adapter.id);
fieldsMap.put("type", adapter.type);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getVioNetworkSharedAdapters() - fields: " + fieldsMap);
fieldsMap.put("id", adapter.id);
fieldsMap.put("type", adapter.type);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getVioNetworkSharedAdapters() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioNetworkSharedAdapters() - error: {}", e.getMessage());
}
});
return list;
}
// VIO Network - Virtual
List<Measurement> getVioNetworkVirtualAdapters() {
List<Measurement> getVioNetworkVirtualAdapters(int sample) throws NullPointerException {
log.debug("getVioNetworkVirtualAdapters()");
List<Measurement> list = new ArrayList<>();
metric.getSample(sample).viosUtil.forEach( vio -> {
vio.network.virtualEthernetAdapters.forEach( adapter -> {
try {
metric.getSample().viosUtil.forEach( vio -> {
vio.network.virtualEthernetAdapters.forEach( adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("vlanid", String.valueOf(adapter.vlanId));
tagsMap.put("vswitchid", String.valueOf(adapter.vswitchId));
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkVirtualAdapters() - tags: " + tagsMap);
tagsMap.put("vlanid", String.valueOf(adapter.vlanId));
tagsMap.put("vswitchid", String.valueOf(adapter.vswitchId));
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkVirtualAdapters() - tags: " + tagsMap);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("droppedPhysicalPackets", adapter.droppedPhysicalPackets);
fieldsMap.put("isPortVlanId", adapter.isPortVlanId);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("receivedPhysicalBytes", adapter.receivedPhysicalBytes);
fieldsMap.put("receivedPhysicalPackets", adapter.receivedPhysicalPackets);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("sentPhysicalBytes", adapter.sentPhysicalBytes);
fieldsMap.put("sentPhysicalPackets", adapter.sentPhysicalPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
log.trace("getVioNetworkVirtualAdapters() - fields: " + fieldsMap);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("droppedPhysicalPackets", adapter.droppedPhysicalPackets);
fieldsMap.put("isPortVlanId", adapter.isPortVlanId);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("receivedPhysicalBytes", adapter.receivedPhysicalBytes);
fieldsMap.put("receivedPhysicalPackets", adapter.receivedPhysicalPackets);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("sentPhysicalBytes", adapter.sentPhysicalBytes);
fieldsMap.put("sentPhysicalPackets", adapter.sentPhysicalPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
fieldsMap.put("transferredPhysicalBytes", adapter.transferredPhysicalBytes);
log.trace("getVioNetworkVirtualAdapters() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioNetworkVirtualAdapters() - error: {}", e.getMessage());
}
});
return list;
}
// VIO Network - Generic
List<Measurement> getVioNetworkGenericAdapters() {
List<Measurement> getVioNetworkGenericAdapters(int sample) throws NullPointerException {
log.debug("getVioNetworkGenericAdapters()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach( vio -> {
vio.network.genericAdapters.forEach( adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("id", adapter.id);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkGenericAdapters() - tags: " + tagsMap);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getVioNetworkGenericAdapters() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
});
} catch (Exception e) {
log.warn("getVioNetworkGenericAdapters() - error: {}", e.getMessage());
}
return list;
}
// VIOs - Storage
List<Measurement> getVioStorageLpars() {
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach(vio -> {
metric.getSample(sample).viosUtil.forEach( vio -> {
vio.network.genericAdapters.forEach( adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("id", adapter.id);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioStorageLpars() - tags: " + tagsMap);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioNetworkGenericAdapters() - tags: " + tagsMap);
fieldsMap.put("clientlpars", vio.storage.clientLpars.size());
log.trace("getVioStorageLpars() - fields: " + fieldsMap);
fieldsMap.put("sentBytes", adapter.sentBytes);
fieldsMap.put("sentPackets", adapter.sentPackets);
fieldsMap.put("receivedBytes", adapter.receivedBytes);
fieldsMap.put("receivedPackets", adapter.receivedPackets);
fieldsMap.put("droppedPackets", adapter.droppedPackets);
fieldsMap.put("transferredBytes", adapter.transferredBytes);
log.trace("getVioNetworkGenericAdapters() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioStorageLpars() - error: {}", e.getMessage());
}
});
return list;
}
// VIO Storage FC
List<Measurement> getVioStorageFiberChannelAdapters() {
// VIOs - Storage
List<Measurement> getVioStorageLpars(int sample) throws NullPointerException {
log.debug("getVioStorageLpars()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach( vio -> {
log.trace("getVioStorageFiberChannelAdapters() - VIO: " + vio.name);
metric.getSample(sample).viosUtil.forEach(vio -> {
vio.storage.fiberChannelAdapters.forEach( adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
log.trace("getVioStorageLpars() - tags: " + tagsMap);
tagsMap.put("id", adapter.id);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioStorageFiberChannelAdapters() - tags: " + tagsMap);
fieldsMap.put("clientlpars", vio.storage.clientLpars.size());
log.trace("getVioStorageLpars() - fields: " + fieldsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
log.trace("getVioStorageFiberChannelAdapters() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
list.add(new Measurement(tagsMap, fieldsMap));
});
return list;
}
// VIO Storage FC
List<Measurement> getVioStorageFiberChannelAdapters(int sample) throws NullPointerException {
log.debug("getVioStorageFiberChannelAdapters()");
List<Measurement> list = new ArrayList<>();
metric.getSample(sample).viosUtil.forEach( vio -> {
log.trace("getVioStorageFiberChannelAdapters() - VIO: " + vio.name);
vio.storage.fiberChannelAdapters.forEach( adapter -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("id", adapter.id);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioStorageFiberChannelAdapters() - tags: " + tagsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
log.trace("getVioStorageFiberChannelAdapters() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioStorageFiberChannelAdapters() - error: {}", e.getMessage());
}
});
return list;
}
// VIO Storage - Physical
List<Measurement> getVioStoragePhysicalAdapters() {
List<Measurement> getVioStoragePhysicalAdapters(int sample) throws NullPointerException {
log.debug("getVioStoragePhysicalAdapters()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach( vio -> {
log.trace("getVioStoragePhysicalAdapters() - VIO: " + vio.name);
vio.storage.genericPhysicalAdapters.forEach( adapter -> {
metric.getSample(sample).viosUtil.forEach( vio -> {
log.trace("getVioStoragePhysicalAdapters() - VIO: " + vio.name);
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
vio.storage.genericPhysicalAdapters.forEach( adapter -> {
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioStoragePhysicalAdapters() - tags: " + tagsMap);
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.trace("getVioStoragePhysicalAdapters() - fields: " + fieldsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("id", adapter.id);
tagsMap.put("location", adapter.physicalLocation);
log.trace("getVioStoragePhysicalAdapters() - tags: " + tagsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.trace("getVioStoragePhysicalAdapters() - fields: " + fieldsMap);
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioStoragePhysicalAdapters() - error: {}", e.getMessage());
}
});
return list;
}
// VIO Storage - Virtual
List<Measurement> getVioStorageVirtualAdapters() {
List<Measurement> getVioStorageVirtualAdapters(int sample) throws NullPointerException {
log.debug("getVioStorageVirtualAdapters()");
List<Measurement> list = new ArrayList<>();
try {
metric.getSample().viosUtil.forEach( (vio) -> {
vio.storage.genericVirtualAdapters.forEach( (adapter) -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
metric.getSample(sample).viosUtil.forEach( (vio) -> {
vio.storage.genericVirtualAdapters.forEach( (adapter) -> {
HashMap<String, String> tagsMap = new HashMap<>();
HashMap<String, Object> fieldsMap = new HashMap<>();
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.debug("getVioStorageVirtualAdapters() - tags: " + tagsMap);
tagsMap.put("servername", entry.getName());
tagsMap.put("viosname", vio.name);
tagsMap.put("location", adapter.physicalLocation);
tagsMap.put("id", adapter.id);
log.debug("getVioStorageVirtualAdapters() - tags: " + tagsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.debug("getVioStorageVirtualAdapters() - fields: " + fieldsMap);
fieldsMap.put("numOfReads", adapter.numOfReads);
fieldsMap.put("numOfWrites", adapter.numOfWrites);
fieldsMap.put("readBytes", adapter.readBytes);
fieldsMap.put("writeBytes", adapter.writeBytes);
fieldsMap.put("transmittedBytes", adapter.transmittedBytes);
fieldsMap.put("type", adapter.type);
log.debug("getVioStorageVirtualAdapters() - fields: " + fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
});
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
} catch (Exception e) {
log.warn("getVioStorageVirtualAdapters() - error: {}", e.getMessage());
}
});
return list;
}
@ -790,10 +760,10 @@ class ManagedSystem extends Resource {
/*
// VIO Storage SSP TODO
List<Measurement> getViosStorageSharedStoragePools() {
List<Measurement> getViosStorageSharedStoragePools(int sample) throws NullPointerException {
List<Measurement> list = new ArrayList<>();
metrics.systemUtil.sample.viosUtil.forEach( vios -> {
metrics.systemUtil.getSample(sample).viosUtil.forEach( vios -> {
vios.storage.fiberChannelAdapters.forEach( adapter -> {
@ -813,7 +783,7 @@ class ManagedSystem extends Resource {
fieldsMap.put("physicalLocation", adapter.physicalLocation);
log.trace("getViosStorageSharedStoragePools() - fields: " + fieldsMap.toString());
list.add(new Measurement(tagsMap, fieldsMap));
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
});
log.trace("getViosStorageSharedStoragePools() - VIOS: " + vios.name);

View File

@ -15,22 +15,25 @@
*/
package biz.nellemann.hmci;
import java.io.IOException;
import static java.lang.Thread.sleep;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import biz.nellemann.hmci.dto.toml.HmcConfiguration;
import biz.nellemann.hmci.dto.xml.Link;
import biz.nellemann.hmci.dto.xml.ManagementConsoleEntry;
import biz.nellemann.hmci.dto.xml.XmlFeed;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.time.Duration;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;
import static java.lang.Thread.sleep;
class ManagementConsole implements Runnable {
@ -61,19 +64,6 @@ class ManagementConsole implements Runnable {
this.influxClient = influxClient;
restClient = new RestClient(configuration.url, configuration.username, configuration.password, configuration.trust);
if(configuration.trace != null) {
try {
File traceDir = new File(configuration.trace);
traceDir.mkdirs();
if(traceDir.canWrite()) {
Boolean doTrace = true;
} else {
log.warn("ManagementConsole() - can't write to trace dir: " + traceDir.toString());
}
} catch (Exception e) {
log.error("ManagementConsole() - trace error: " + e.getMessage());
}
}
this.excludeSystems = configuration.excludeSystems;
this.includeSystems = configuration.includeSystems;
this.excludePartitions = configuration.excludePartitions;
@ -160,7 +150,7 @@ class ManagementConsole implements Runnable {
managedSystems.clear();
for (Link link : entry.getAssociatedManagedSystems()) {
ManagedSystem managedSystem = new ManagedSystem(restClient, link.getHref());
ManagedSystem managedSystem = new ManagedSystem(restClient, influxClient, link.getHref());
managedSystem.setExcludePartitions(excludePartitions);
managedSystem.setIncludePartitions(includePartitions);
managedSystem.discover();
@ -184,7 +174,7 @@ class ManagementConsole implements Runnable {
}
}
} catch (Exception e) {
} catch (IOException e) {
log.warn("discover() - error: {}", e.getMessage());
}
@ -202,63 +192,10 @@ class ManagementConsole implements Runnable {
}
system.refresh();
influxClient.write(system.getDetails(), system.getTimestamp(),"server_details");
influxClient.write(system.getMemoryMetrics(), system.getTimestamp(),"server_memory");
influxClient.write(system.getProcessorMetrics(), system.getTimestamp(),"server_processor");
influxClient.write(system.getPhysicalProcessorPool(), system.getTimestamp(),"server_physicalProcessorPool");
influxClient.write(system.getSharedProcessorPools(), system.getTimestamp(),"server_sharedProcessorPool");
if(system.systemEnergy != null) {
system.systemEnergy.refresh();
if(system.systemEnergy.metric != null) {
influxClient.write(system.systemEnergy.getPowerMetrics(), system.getTimestamp(), "server_energy_power");
influxClient.write(system.systemEnergy.getThermalMetrics(), system.getTimestamp(), "server_energy_thermal");
}
}
influxClient.write(system.getVioDetails(), system.getTimestamp(),"vios_details");
influxClient.write(system.getVioProcessorMetrics(), system.getTimestamp(),"vios_processor");
influxClient.write(system.getVioMemoryMetrics(), system.getTimestamp(),"vios_memory");
influxClient.write(system.getVioNetworkLpars(), system.getTimestamp(),"vios_network_lpars");
influxClient.write(system.getVioNetworkVirtualAdapters(), system.getTimestamp(),"vios_network_virtual");
influxClient.write(system.getVioNetworkSharedAdapters(), system.getTimestamp(),"vios_network_shared");
influxClient.write(system.getVioNetworkGenericAdapters(), system.getTimestamp(),"vios_network_generic");
influxClient.write(system.getVioStorageLpars(), system.getTimestamp(),"vios_storage_lpars");
influxClient.write(system.getVioStorageFiberChannelAdapters(), system.getTimestamp(),"vios_storage_FC");
influxClient.write(system.getVioStorageVirtualAdapters(), system.getTimestamp(),"vios_storage_vFC");
influxClient.write(system.getVioStoragePhysicalAdapters(), system.getTimestamp(),"vios_storage_physical");
// Missing: vios_storage_SSP
system.logicalPartitions.forEach( (partition) -> {
partition.refresh();
influxClient.write(partition.getDetails(), partition.getTimestamp(),"lpar_details");
influxClient.write(partition.getMemoryMetrics(), partition.getTimestamp(),"lpar_memory");
influxClient.write(partition.getProcessorMetrics(), partition.getTimestamp(),"lpar_processor");
influxClient.write(partition.getSriovLogicalPorts(), partition.getTimestamp(),"lpar_net_sriov");
influxClient.write(partition.getVirtualEthernetAdapterMetrics(), partition.getTimestamp(),"lpar_net_virtual");
influxClient.write(partition.getVirtualGenericAdapterMetrics(), partition.getTimestamp(),"lpar_storage_virtual");
influxClient.write(partition.getVirtualFibreChannelAdapterMetrics(), partition.getTimestamp(),"lpar_storage_vFC");
});
system.process();
});
}
/*
private void writeTraceFile(String id, String json) {
String fileName = String.format("%s-%s.json", id, Instant.now().toString());
try {
log.debug("Writing trace file: " + fileName);
File traceFile = new File(traceDir, fileName);
BufferedWriter writer = new BufferedWriter(new FileWriter(traceFile));
writer.write(json);
writer.close();
} catch (IOException e) {
log.warn("writeTraceFile() - " + e.getMessage());
}
}
*/
}

View File

@ -15,14 +15,23 @@
*/
package biz.nellemann.hmci;
import java.time.Instant;
import java.util.Map;
public class Measurement {
final Instant timestamp;
final Map<String, String> tags;
final Map<String, Object> fields;
Measurement(Map<String, String> tags, Map<String, Object> fields) {
this.timestamp = Instant.now();
this.tags = tags;
this.fields = fields;
}
Measurement(Instant timestamp, Map<String, String> tags, Map<String, Object> fields) {
this.timestamp = timestamp;
this.tags = tags;
this.fields = fields;
}

View File

@ -2,6 +2,7 @@ package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.json.ProcessedMetrics;
import biz.nellemann.hmci.dto.json.SystemUtil;
import biz.nellemann.hmci.dto.json.UtilSample;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.slf4j.Logger;
@ -10,14 +11,20 @@ import org.slf4j.LoggerFactory;
import java.time.Instant;
import java.time.format.DateTimeFormatter;
import java.time.format.DateTimeParseException;
import java.util.ArrayList;
public class Resource {
public abstract class Resource {
private final static Logger log = LoggerFactory.getLogger(Resource.class);
private final ObjectMapper objectMapper = new ObjectMapper();
private final ArrayList<String> sampleHistory = new ArrayList<>();
protected SystemUtil metric;
protected final int MAX_NUMBER_OF_SAMPLES = 60;
protected final int MIN_NUMBER_OF_SAMPLES = 5;
protected int noOfSamples = MAX_NUMBER_OF_SAMPLES;
Resource() {
@ -35,6 +42,7 @@ public class Resource {
try {
ProcessedMetrics processedMetrics = objectMapper.readValue(json, ProcessedMetrics.class);
metric = processedMetrics.systemUtil;
log.trace("deserialize() - samples: {}", metric.samples.size());
} catch (Exception e) {
log.error("deserialize() - error: {}", e.getMessage());
}
@ -61,4 +69,68 @@ public class Resource {
return instant;
}
Instant getTimestamp(int sampleNumber) {
Instant instant = Instant.now();
if (metric == null) {
return instant;
}
String timestamp = metric.getSample(sampleNumber).sampleInfo.timestamp;
try {
log.trace("getTimeStamp() - PMC Timestamp: {}", timestamp);
DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss[XXX][X]");
instant = Instant.from(dateTimeFormatter.parse(timestamp));
log.trace("getTimestamp() - Instant: {}", instant.toString());
} catch(DateTimeParseException e) {
log.warn("getTimestamp() - parse error: {}", timestamp);
}
return instant;
}
public void process() {
if(metric == null) {
return;
}
int processed = 0;
int sampleSize = metric.samples.size();
log.debug("process() - Samples Returned: {}, Samples in History: {}, Fetch Next Counter: {}", sampleSize, sampleHistory.size(), noOfSamples);
for(int i = 0; i<sampleSize; i++) {
UtilSample sample = metric.getSample(i);
String timestamp = sample.getInfo().timestamp;
if(sampleHistory.contains(timestamp)) {
//log.info("process() - Sample \"{}\" already processed", timestamp);
continue; // Already processed
}
try {
process(i);
processed++;
sampleHistory.add(timestamp); // Add to processed history
} catch (NullPointerException e) {
log.warn("process() - error", e);
}
}
// Remove old elements from history
for(int n = noOfSamples; n < sampleHistory.size(); n++) {
//log.info("process() - Removing element no. {} from sampleHistory: {}", n, sampleHistory.get(0));
sampleHistory.remove(0);
}
// Decrease down to minSamples
if(noOfSamples > MIN_NUMBER_OF_SAMPLES) {
noOfSamples = Math.min( (noOfSamples - 1), Math.max( (noOfSamples - processed) + 5, MIN_NUMBER_OF_SAMPLES));
}
}
public abstract void process(int sample) throws NullPointerException;
}

View File

@ -1,23 +1,33 @@
package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.xml.LogonResponse;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import okhttp3.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLSocketFactory;
import javax.net.ssl.TrustManager;
import javax.net.ssl.X509TrustManager;
import java.io.*;
import java.net.*;
import java.security.KeyManagementException;
import java.security.NoSuchAlgorithmException;
import java.security.SecureRandom;
import java.security.cert.X509Certificate;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import biz.nellemann.hmci.dto.xml.LogonResponse;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.RequestBody;
import okhttp3.Response;
public class RestClient {
@ -29,15 +39,18 @@ public class RestClient {
protected OkHttpClient httpClient;
// OkHttpClient timeouts
private final static int CONNECT_TIMEOUT = 30;
private final static int WRITE_TIMEOUT = 30;
private final static int READ_TIMEOUT = 180;
private final static int CONNECT_TIMEOUT_SEC = 10;
private final static int WRITE_TIMEOUT_SEC = 30;
private final static int READ_TIMEOUT_SEC = 180;
protected String authToken;
protected final String baseUrl;
protected final String username;
protected final String password;
private final static int MAX_MINUTES_BETWEEN_AUTHENTICATION = 60; // TODO: Make configurable and match HMC timeout settings
private Instant lastAuthenticationTimestamp;
public RestClient(String baseUrl, String username, String password, Boolean trustAll) {
this.baseUrl = baseUrl;
@ -48,6 +61,23 @@ public class RestClient {
} else {
this.httpClient = getSafeOkHttpClient();
}
/*
if(configuration.trace != null) {
try {
File traceDir = new File(configuration.trace);
traceDir.mkdirs();
if(traceDir.canWrite()) {
Boolean doTrace = true;
} else {
log.warn("ManagementConsole() - can't write to trace dir: " + traceDir.toString());
}
} catch (Exception e) {
log.error("ManagementConsole() - trace error: " + e.getMessage());
}
}*/
Thread shutdownHook = new Thread(this::logoff);
Runtime.getRuntime().addShutdownHook(shutdownHook);
}
@ -55,6 +85,9 @@ public class RestClient {
* Logon to the HMC and get an authentication token for further requests.
*/
public synchronized void login() {
if(authToken != null) {
logoff();
}
log.info("Connecting to HMC - {} @ {}", username, baseUrl);
StringBuilder payload = new StringBuilder();
@ -87,10 +120,12 @@ public class RestClient {
LogonResponse logonResponse = xmlMapper.readValue(responseBody, LogonResponse.class);
authToken = logonResponse.getToken();
lastAuthenticationTimestamp = Instant.now();
log.debug("logon() - auth token: {}", authToken);
} catch (Exception e) {
log.warn("logon() - error: {}", e.getMessage());
lastAuthenticationTimestamp = null;
}
}
@ -116,13 +151,12 @@ public class RestClient {
.delete()
.build();
String responseBody;
try (Response response = httpClient.newCall(request).execute()) {
responseBody = Objects.requireNonNull(response.body()).string();
} catch (IOException e) {
log.warn("logoff() error: {}", e.getMessage());
} finally {
authToken = null;
lastAuthenticationTimestamp = null;
}
} catch (MalformedURLException e) {
@ -147,10 +181,14 @@ public class RestClient {
* Return a Response from the HMC
* @param url to get Response from
* @return Response body string
* @throws IOException
*/
public synchronized String getRequest(URL url) throws IOException {
log.trace("getRequest() - URL: {}", url.toString());
log.debug("getRequest() - URL: {}", url.toString());
if (lastAuthenticationTimestamp == null || lastAuthenticationTimestamp.plus(MAX_MINUTES_BETWEEN_AUTHENTICATION, ChronoUnit.MINUTES).isBefore(Instant.now())) {
login();
}
Request request = new Request.Builder()
.url(url)
@ -196,7 +234,7 @@ public class RestClient {
String responseBody = null;
try (Response responseRetry = httpClient.newCall(request).execute()) {
if(responseRetry.isSuccessful()) {
responseBody = responseRetry.body().string();
responseBody = Objects.requireNonNull(responseRetry.body()).string();
}
}
return responseBody;
@ -207,12 +245,16 @@ public class RestClient {
* Send a POST request with a payload (can be null) to the HMC
* @param url
* @param payload
* @return
* @return Response body string
* @throws IOException
*/
public synchronized String postRequest(URL url, String payload) throws IOException {
log.debug("sendPostRequest() - URL: {}", url.toString());
if (lastAuthenticationTimestamp == null || lastAuthenticationTimestamp.plus(MAX_MINUTES_BETWEEN_AUTHENTICATION, ChronoUnit.MINUTES).isBefore(Instant.now())) {
login();
}
RequestBody requestBody;
if(payload != null) {
requestBody = RequestBody.create(payload, MEDIA_TYPE_IBM_XML_POST);
@ -276,9 +318,9 @@ public class RestClient {
OkHttpClient.Builder builder = new OkHttpClient.Builder();
builder.sslSocketFactory(sslSocketFactory, (X509TrustManager)trustAllCerts[0]);
builder.hostnameVerifier((hostname, session) -> true);
builder.connectTimeout(CONNECT_TIMEOUT, TimeUnit.SECONDS);
builder.writeTimeout(WRITE_TIMEOUT, TimeUnit.SECONDS);
builder.readTimeout(READ_TIMEOUT, TimeUnit.SECONDS);
builder.connectTimeout(CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
builder.writeTimeout(WRITE_TIMEOUT_SEC, TimeUnit.SECONDS);
builder.readTimeout(READ_TIMEOUT_SEC, TimeUnit.SECONDS);
return builder.build();
} catch (KeyManagementException | NoSuchAlgorithmException e) {
@ -293,11 +335,28 @@ public class RestClient {
*/
private static OkHttpClient getSafeOkHttpClient() {
OkHttpClient.Builder builder = new OkHttpClient.Builder();
builder.connectTimeout(CONNECT_TIMEOUT, TimeUnit.SECONDS);
builder.writeTimeout(WRITE_TIMEOUT, TimeUnit.SECONDS);
builder.readTimeout(READ_TIMEOUT, TimeUnit.SECONDS);
builder.connectTimeout(CONNECT_TIMEOUT_SEC, TimeUnit.SECONDS);
builder.writeTimeout(WRITE_TIMEOUT_SEC, TimeUnit.SECONDS);
builder.readTimeout(READ_TIMEOUT_SEC, TimeUnit.SECONDS);
return builder.build();
}
/*
private void writeTraceFile(String id, String json) {
String fileName = String.format("%s-%s.json", id, Instant.now().toString());
try {
log.debug("Writing trace file: " + fileName);
File traceFile = new File(traceDir, fileName);
BufferedWriter writer = new BufferedWriter(new FileWriter(traceFile));
writer.write(json);
writer.close();
} catch (IOException e) {
log.warn("writeTraceFile() - " + e.getMessage());
}
}
*/
}

View File

@ -1,29 +1,37 @@
package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.xml.Link;
import biz.nellemann.hmci.dto.xml.XmlFeed;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.URI;
import java.util.*;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import biz.nellemann.hmci.dto.xml.Link;
import biz.nellemann.hmci.dto.xml.XmlFeed;
class SystemEnergy extends Resource {
private final static Logger log = LoggerFactory.getLogger(SystemEnergy.class);
private final RestClient restClient;
private final InfluxClient influxClient;
private final ManagedSystem managedSystem;
protected String id;
protected String name;
public SystemEnergy(RestClient restClient, ManagedSystem managedSystem) {
public SystemEnergy(RestClient restClient, InfluxClient influxClient, ManagedSystem managedSystem) {
log.debug("SystemEnergy()");
this.restClient = restClient;
this.influxClient = influxClient;
this.managedSystem = managedSystem;
}
@ -32,7 +40,7 @@ class SystemEnergy extends Resource {
log.debug("refresh()");
try {
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?Type=Energy&NoOfSamples=1", managedSystem.id));
String xml = restClient.getRequest(String.format("/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?Type=Energy&NoOfSamples=%d", managedSystem.id, noOfSamples));
// Do not try to parse empty response
if(xml == null || xml.length() <= 1) {
@ -66,8 +74,17 @@ class SystemEnergy extends Resource {
@Override
public void process(int sample) {
if(metric != null) {
log.debug("process() - sample: {}", sample);
influxClient.write(getPowerMetrics(sample), "server_energy_power");
influxClient.write(getThermalMetrics(sample), "server_energy_thermal");
}
}
List<Measurement> getPowerMetrics() {
List<Measurement> getPowerMetrics(int sample) {
List<Measurement> list = new ArrayList<>();
try {
@ -77,10 +94,10 @@ class SystemEnergy extends Resource {
tagsMap.put("servername", managedSystem.name);
log.trace("getPowerMetrics() - tags: {}", tagsMap);
fieldsMap.put("powerReading", metric.getSample().energyUtil.powerUtil.powerReading);
fieldsMap.put("powerReading", metric.getSample(sample).energyUtil.powerUtil.powerReading);
log.trace("getPowerMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getPowerMetrics() - error: {}", e.getMessage());
}
@ -89,7 +106,7 @@ class SystemEnergy extends Resource {
}
List<Measurement> getThermalMetrics() {
List<Measurement> getThermalMetrics(int sample) {
List<Measurement> list = new ArrayList<>();
try {
@ -99,11 +116,11 @@ class SystemEnergy extends Resource {
tagsMap.put("servername", managedSystem.name);
log.trace("getThermalMetrics() - tags: {}", tagsMap);
metric.getSample().energyUtil.thermalUtil.cpuTemperatures.forEach((t) -> {
metric.getSample(sample).energyUtil.thermalUtil.cpuTemperatures.forEach((t) -> {
fieldsMap.put("cpuTemperature_" + t.entityInstance, t.temperatureReading);
});
metric.getSample().energyUtil.thermalUtil.inletTemperatures.forEach((t) -> {
metric.getSample(sample).energyUtil.thermalUtil.inletTemperatures.forEach((t) -> {
fieldsMap.put("inletTemperature_" + t.entityInstance, t.temperatureReading);
});
@ -114,7 +131,7 @@ class SystemEnergy extends Resource {
log.trace("getThermalMetrics() - fields: {}", fieldsMap);
list.add(new Measurement(tagsMap, fieldsMap));
list.add(new Measurement(getTimestamp(sample), tagsMap, fieldsMap));
} catch (Exception e) {
log.warn("getThermalMetrics() - error: {}", e.getMessage());

View File

@ -15,12 +15,12 @@
*/
package biz.nellemann.hmci;
import picocli.CommandLine;
import java.io.IOException;
import java.util.jar.Attributes;
import java.util.jar.Manifest;
import picocli.CommandLine;
class VersionProvider implements CommandLine.IVersionProvider {
@Override

View File

@ -1,13 +1,16 @@
package biz.nellemann.hmci;
import biz.nellemann.hmci.dto.xml.VirtualIOServerEntry;
import biz.nellemann.hmci.dto.xml.XmlEntry;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.URI;
import java.net.URISyntaxException;
import com.fasterxml.jackson.dataformat.xml.XmlMapper;
import biz.nellemann.hmci.dto.xml.VirtualIOServerEntry;
import biz.nellemann.hmci.dto.xml.XmlEntry;
public class VirtualIOServer {
private final static Logger log = LoggerFactory.getLogger(VirtualIOServer.class);
@ -58,7 +61,7 @@ public class VirtualIOServer {
throw new UnsupportedOperationException("Failed to deserialize VirtualIOServer");
}
} catch (Exception e) {
} catch (IOException e) {
log.error("discover() - error: {}", e.getMessage());
}
}

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class EnergyUtil {
public PowerUtil powerUtil = new PowerUtil();

View File

@ -1,10 +1,13 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
/**
* Storage adapter
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public final class FiberChannelAdapter {
public String id;

View File

@ -1,6 +1,9 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class GenericAdapter {
public String id;

View File

@ -1,10 +1,13 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class GenericPhysicalAdapters {
public String id;
public String type;
public String type = "";
public String physicalLocation;
public double numOfReads;
public double numOfWrites;

View File

@ -1,12 +1,13 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
/**
* Storage adapter
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public final class GenericVirtualAdapter {
public String id = "";

View File

@ -1,6 +1,9 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class LparProcessor {
public Integer poolId = 0;

View File

@ -1,9 +1,12 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.ArrayList;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class Network {
public List<String> clientLpars = new ArrayList<>();

View File

@ -1,6 +1,9 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class PhysicalProcessorPool {
public double assignedProcUnits = 0.0;

View File

@ -1,7 +1,10 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class PowerUtil {
public Number powerReading = 0.0;
public float powerReading = 0.0F;
}

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public class ProcessedMetrics {
public SystemUtil systemUtil;

View File

@ -1,7 +1,10 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SRIOVAdapter {
public String drcIndex = "";

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public class SRIOVLogicalPort {
public String drcIndex;

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SRIOVPhysicalPort {
public String id;

View File

@ -1,19 +1,21 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SampleInfo {
@JsonProperty("timeStamp")
public String timestamp ;
public String timestamp;
public String getTimeStamp() {
return timestamp;
}
public Integer status ;
public Integer status;
@JsonProperty("errorInfo")
public List<ErrorInfo> errors;

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ServerMemory {
public double totalMem = 0.0;

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ServerProcessor {
public Double totalProcUnits = 0.0;

View File

@ -1,9 +1,12 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.ArrayList;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ServerUtil {
public final ServerProcessor processor = new ServerProcessor();

View File

@ -1,12 +1,15 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.List;
/**
* Network adapter
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SharedAdapter {
public String id;

View File

@ -1,6 +1,9 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SharedProcessorPool {
public int id;

View File

@ -1,9 +1,12 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.ArrayList;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class Storage {
public List<String> clientLpars = new ArrayList<>();

View File

@ -1,9 +1,10 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonUnwrapped;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SystemFirmware {
@JsonUnwrapped

View File

@ -1,9 +1,11 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.annotation.JsonUnwrapped;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class SystemUtil {
@JsonProperty("utilInfo")
@ -17,6 +19,10 @@ public final class SystemUtil {
@JsonProperty("utilSamples")
public List<UtilSample> samples;
public UtilSample getSample(int n) {
return samples.size() > n ? samples.get(n) : new UtilSample();
}
public UtilSample getSample() {
return samples.size() > 0 ? samples.get(0) : new UtilSample();
}

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class Temperature {
public String entityId = "";

View File

@ -1,8 +1,11 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import java.util.ArrayList;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ThermalUtil {
public List<Temperature> inletTemperatures = new ArrayList<>();

View File

@ -2,7 +2,7 @@ package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties({ "metricArrayOrder" })
@JsonIgnoreProperties(ignoreUnknown = true)
public final class UtilInfo {
public String version = "";

View File

@ -1,12 +1,13 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonAlias;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.ArrayList;
import java.util.List;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class UtilSample {
public String sampleType = "";

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ViosMemory {
public double assignedMem;
public double utilizedMem;

View File

@ -1,5 +1,8 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
@JsonIgnoreProperties(ignoreUnknown = true)
public final class ViosUtil {
public int id;

View File

@ -1,10 +1,13 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
/**
* Network adapter SEA
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public final class VirtualEthernetAdapter {
public String physicalLocation = "";

View File

@ -1,12 +1,16 @@
package biz.nellemann.hmci.dto.json;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
/**
* Storage adapter - NPIV ?
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public final class VirtualFiberChannelAdapter {
public String id = "";
public String wwpn = "";
public String wwpn2 = "";
public String physicalLocation = "";

View File

@ -3,6 +3,10 @@ package biz.nellemann.hmci.dto.toml;
public class InfluxConfiguration {
public String url;
public String org;
public String token;
public String bucket;
public String username;
public String password;
public String database;

View File

@ -5,7 +5,6 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.io.Serializable;
import java.util.List;
//@JsonIgnoreProperties({ "author", "etag" })
@JsonIgnoreProperties(ignoreUnknown = true)

View File

@ -6,9 +6,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlElementWrapper;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.Set;
//@JsonIgnoreProperties({ "link" })
@JsonIgnoreProperties(ignoreUnknown = true)

View File

@ -19,6 +19,9 @@ class LogicalPartitionTest extends Specification {
@Shared
private RestClient serviceClient
@Shared
private InfluxClient influxClient
@Shared
private ManagedSystem managedSystem
@ -39,7 +42,7 @@ class LogicalPartitionTest extends Specification {
MockResponses.prepareClientResponseForLogicalPartition(mockServer)
serviceClient.login()
managedSystem = new ManagedSystem(serviceClient, String.format("%s/rest/api/uom/ManagementConsole/2c6b6620-e3e3-3294-aaf5-38e546ff672b/ManagedSystem/b597e4da-2aab-3f52-8616-341d62153559", serviceClient.baseUrl));
managedSystem = new ManagedSystem(serviceClient, influxClient, String.format("%s/rest/api/uom/ManagementConsole/2c6b6620-e3e3-3294-aaf5-38e546ff672b/ManagedSystem/b597e4da-2aab-3f52-8616-341d62153559", serviceClient.baseUrl));
managedSystem.discover()
logicalPartition = managedSystem.logicalPartitions.first()
@ -49,6 +52,7 @@ class LogicalPartitionTest extends Specification {
}
def cleanupSpec() {
serviceClient.logoff()
mockServer.stop()
}
@ -77,6 +81,7 @@ class LogicalPartitionTest extends Specification {
then:
logicalPartition.metric != null
logicalPartition.metric.samples.size() == 6;
}
@ -85,9 +90,9 @@ class LogicalPartitionTest extends Specification {
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
then:
logicalPartition.metric.getSample().lparsUtil.memory.logicalMem == 8192.000
logicalPartition.metric.getSample().lparsUtil.processor.utilizedProcUnits == 0.001
logicalPartition.metric.getSample().lparsUtil.network.virtualEthernetAdapters.first().receivedBytes == 276.467
logicalPartition.metric.getSample().lparsUtil.memory.logicalMem == 16384.000
logicalPartition.metric.getSample().lparsUtil.processor.utilizedProcUnits == 0.00793
logicalPartition.metric.getSample().lparsUtil.network.virtualEthernetAdapters.first().receivedBytes == 54.0
}
@ -95,15 +100,13 @@ class LogicalPartitionTest extends Specification {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getDetails()
List<Measurement> listOfMeasurements = logicalPartition.getDetails(0)
then:
listOfMeasurements.size() == 1
listOfMeasurements.first().fields['affinityScore'] == 100.0
listOfMeasurements.first().fields['osType'] == 'Linux'
listOfMeasurements.first().fields['type'] == 'AIX/Linux'
listOfMeasurements.first().tags['lparname'] == 'rhel8-ocp-helper'
listOfMeasurements.first().fields['osType'] == 'IBM i'
listOfMeasurements.first().fields['type'] == 'IBMi'
}
@ -111,11 +114,11 @@ class LogicalPartitionTest extends Specification {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getMemoryMetrics()
List<Measurement> listOfMeasurements = logicalPartition.getMemoryMetrics(0)
then:
listOfMeasurements.size() == 1
listOfMeasurements.first().fields['logicalMem'] == 8192.000
listOfMeasurements.first().fields['logicalMem'] == 16384.0
listOfMeasurements.first().tags['lparname'] == 'rhel8-ocp-helper'
}
@ -125,11 +128,11 @@ class LogicalPartitionTest extends Specification {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getProcessorMetrics()
List<Measurement> listOfMeasurements = logicalPartition.getProcessorMetrics(0)
then:
listOfMeasurements.size() == 1
listOfMeasurements.first().fields['utilizedProcUnits'] == 0.001
listOfMeasurements.first().fields['utilizedProcUnits'] == 0.00793
listOfMeasurements.first().tags['lparname'] == 'rhel8-ocp-helper'
}
@ -139,12 +142,12 @@ class LogicalPartitionTest extends Specification {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getVirtualEthernetAdapterMetrics()
List<Measurement> listOfMeasurements = logicalPartition.getVirtualEthernetAdapterMetrics(0)
then:
listOfMeasurements.size() == 1
listOfMeasurements.first().fields['receivedBytes'] == 276.467
listOfMeasurements.first().tags['location'] == 'U9009.42A.21F64EV-V13-C32'
listOfMeasurements.first().fields['receivedBytes'] == 54.0
listOfMeasurements.first().tags['location'] == 'U9009.42A.21F64EV-V11-C7'
}
@ -152,25 +155,13 @@ class LogicalPartitionTest extends Specification {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getVirtualFibreChannelAdapterMetrics()
List<Measurement> listOfMeasurements = logicalPartition.getVirtualFibreChannelAdapterMetrics(0)
then:
listOfMeasurements.size() == 4
listOfMeasurements.first().fields['writeBytes'] == 6690.133
listOfMeasurements.size() == 2
listOfMeasurements.first().fields['writeBytes'] == 4454.4
listOfMeasurements.first().tags['viosId'] == '1'
}
void "test getVirtualGenericAdapterMetrics"() {
when:
logicalPartition.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = logicalPartition.getVirtualGenericAdapterMetrics()
then:
listOfMeasurements.size() == 1
listOfMeasurements.first().fields['readBytes'] == 0.0
}
}

View File

@ -17,6 +17,9 @@ class ManagedSystemTest extends Specification {
@Shared
private RestClient serviceClient
@Shared
private InfluxClient influxClient
@Shared
private ManagedSystem managedSystem
@ -33,12 +36,13 @@ class ManagedSystemTest extends Specification {
MockResponses.prepareClientResponseForVirtualIOServer(mockServer)
MockResponses.prepareClientResponseForLogicalPartition(mockServer)
serviceClient.login()
managedSystem = new ManagedSystem(serviceClient, String.format("%s/rest/api/uom/ManagementConsole/2c6b6620-e3e3-3294-aaf5-38e546ff672b/ManagedSystem/b597e4da-2aab-3f52-8616-341d62153559", serviceClient.baseUrl));
managedSystem = new ManagedSystem(serviceClient, influxClient, String.format("%s/rest/api/uom/ManagementConsole/2c6b6620-e3e3-3294-aaf5-38e546ff672b/ManagedSystem/b597e4da-2aab-3f52-8616-341d62153559", serviceClient.baseUrl));
managedSystem.discover()
metricsFile = new File(getClass().getResource('/2-managed-system-perf-data2.json').toURI())
}
def cleanupSpec() {
serviceClient.logoff()
mockServer.stop()
}
@ -55,7 +59,7 @@ class ManagedSystemTest extends Specification {
when:
managedSystem.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = managedSystem.getDetails()
List<Measurement> listOfMeasurements = managedSystem.getDetails(0)
then:
listOfMeasurements.size() == 1
@ -68,7 +72,7 @@ class ManagedSystemTest extends Specification {
when:
managedSystem.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = managedSystem.getMemoryMetrics()
List<Measurement> listOfMeasurements = managedSystem.getMemoryMetrics(0)
then:
listOfMeasurements.size() == 1
@ -79,7 +83,7 @@ class ManagedSystemTest extends Specification {
when:
managedSystem.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = managedSystem.getProcessorMetrics()
List<Measurement> listOfMeasurements = managedSystem.getProcessorMetrics(0)
then:
listOfMeasurements.size() == 1
@ -90,7 +94,7 @@ class ManagedSystemTest extends Specification {
when:
managedSystem.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = managedSystem.getSharedProcessorPools()
List<Measurement> listOfMeasurements = managedSystem.getSharedProcessorPools(0)
then:
listOfMeasurements.size() == 4
@ -100,7 +104,7 @@ class ManagedSystemTest extends Specification {
void "test getPhysicalProcessorPool"() {
when:
managedSystem.deserialize(metricsFile.getText('UTF-8'))
List<Measurement> listOfMeasurements = managedSystem.getPhysicalProcessorPool()
List<Measurement> listOfMeasurements = managedSystem.getPhysicalProcessorPool(0)
then:
listOfMeasurements.size() == 1

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff