Support for configuration file and multiple HMC's.
Initial work on build of deb/rpm packages. Build pipeline setup.
This commit is contained in:
parent
dc980e269d
commit
b5cdc968e5
11
.editorconfig
Normal file
11
.editorconfig
Normal file
|
@ -0,0 +1,11 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.yml]
|
||||
indent_size = 2
|
21
README.md
21
README.md
|
@ -1,19 +1,22 @@
|
|||
# HMC Insights
|
||||
|
||||
Small Java-based utility to fetch metrics from one or more HMC's and push those to an InfluxDB time-series database.
|
||||
|
||||
## TODO Liste
|
||||
|
||||
- Use TOML for configuration file, to support multiple HMC's - https://github.com/tomlj/tomlj
|
||||
|
||||
Small utility to fetch metrics from one or more HMC's and push those to an InfluxDB time-series database.
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
...
|
||||
### Create Configuration
|
||||
|
||||
Create a configuration file with setup for HMC's and InfluxDB.
|
||||
|
||||
|
||||
### Run HMCi Tool
|
||||
|
||||
Requires Java 8+ runtime.
|
||||
|
||||
|
||||
## Development Information
|
||||
|
||||
### Build & Test
|
||||
### Build & Test
|
||||
|
||||
Use the gradle build tool
|
||||
|
||||
|
@ -37,4 +40,4 @@ Start the Grafana container, linking it to the InfluxDB container
|
|||
|
||||
docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/grafana:7.1.3
|
||||
|
||||
Configure a new InfluxDB datasource on **http://influxdb:8086** to talk to the InfluxDB container. The database must be created beforehand, this can be done by running the hmci tool.
|
||||
Configure a new InfluxDB datasource on **http://influxdb:8086** named **hmci** to connect to the InfluxDB container. The database must be created beforehand, this can be done by running the hmci tool first. Grafana dashboards can be imported from the **doc/** folder.
|
||||
|
|
22
bitbucket-pipelines.yml
Normal file
22
bitbucket-pipelines.yml
Normal file
|
@ -0,0 +1,22 @@
|
|||
image: openjdk:8
|
||||
|
||||
pipelines:
|
||||
branches:
|
||||
master:
|
||||
- step:
|
||||
caches:
|
||||
- gradle
|
||||
name: Build and Test
|
||||
script:
|
||||
- ./gradlew clean build
|
||||
tags: # add the 'tags' section
|
||||
v*: # specify the tag
|
||||
- step: # define the build pipeline for the tag
|
||||
caches:
|
||||
- gradle
|
||||
name: Build and Release
|
||||
script:
|
||||
- ./gradlew clean build shadowJar startShadowScripts buildDep buildRpm
|
||||
- for file in ${BITBUCKET_CLONE_DIR}/build/libs/*-all.jar ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
||||
- for file in ${BITBUCKET_CLONE_DIR}/build/distribution/*_all.deb ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
||||
- for file in ${BITBUCKET_CLONE_DIR}/build/distribution/*.noarch.rpm ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
34
build.gradle
34
build.gradle
|
@ -12,6 +12,9 @@ plugins {
|
|||
|
||||
// Apply the application plugin to add support for building a CLI application.
|
||||
id 'application'
|
||||
|
||||
id "com.github.johnrengelman.shadow" version "6.0.0"
|
||||
id "nebula.ospackage" version "8.4.1"
|
||||
}
|
||||
|
||||
repositories {
|
||||
|
@ -25,7 +28,6 @@ dependencies {
|
|||
implementation 'org.codehaus.groovy:groovy-all:3.0.5'
|
||||
implementation 'com.squareup.okhttp3:okhttp:4.8.0'
|
||||
implementation 'org.influxdb:influxdb-java:2.19'
|
||||
// implementation 'org.tomlj:tomlj:1.0.0'
|
||||
implementation 'org.slf4j:slf4j-api:1.7.+'
|
||||
runtimeOnly 'ch.qos.logback:logback-classic:1.+'
|
||||
|
||||
|
@ -45,3 +47,33 @@ application {
|
|||
test {
|
||||
useJUnitPlatform()
|
||||
}
|
||||
|
||||
apply plugin: 'nebula.ospackage'
|
||||
ospackage {
|
||||
packageName = 'hmci'
|
||||
release = '1'
|
||||
|
||||
into '/opt/hmci'
|
||||
|
||||
from(shadowJar.outputs.files) {
|
||||
into 'lib'
|
||||
}
|
||||
|
||||
from('build/scriptsShadow') {
|
||||
into 'bin'
|
||||
}
|
||||
|
||||
from('conf/') {
|
||||
into 'conf'
|
||||
}
|
||||
}
|
||||
|
||||
buildRpm {
|
||||
dependsOn startShadowScripts
|
||||
requires('java-1.8.0-openjdk-headless')
|
||||
}
|
||||
|
||||
buildDeb {
|
||||
dependsOn startShadowScripts
|
||||
requires('default-jre-headless')
|
||||
}
|
||||
|
|
37
conf/hmci.groovy
Normal file
37
conf/hmci.groovy
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
Configuration for HMCi
|
||||
*/
|
||||
|
||||
hmci.refresh = 30
|
||||
hmci.rescan = 15
|
||||
|
||||
// InfluxDB to save metrics
|
||||
influx {
|
||||
url = "http://10.32.64.29:8086"
|
||||
username = "root"
|
||||
password = ""
|
||||
database = "hmci"
|
||||
|
||||
}
|
||||
|
||||
// One or more HMC to query for data and metrics
|
||||
hmc {
|
||||
|
||||
// HMC on our primary site
|
||||
site1 {
|
||||
url = "https://10.32.64.39:12443"
|
||||
username = "hmci"
|
||||
password = "hmcihmci"
|
||||
unsafe = true
|
||||
}
|
||||
|
||||
/*
|
||||
site2 {
|
||||
url = "https://10.32.64.39:12443"
|
||||
username = "viewer"
|
||||
password = "someSecret"
|
||||
unsafe = false
|
||||
}
|
||||
*/
|
||||
|
||||
}
|
2
gradle.properties
Normal file
2
gradle.properties
Normal file
|
@ -0,0 +1,2 @@
|
|||
group = biz.nellemann.hmci
|
||||
version = 1.0.1
|
13
hmci.service
Normal file
13
hmci.service
Normal file
|
@ -0,0 +1,13 @@
|
|||
[Unit]
|
||||
Description=HMC Insights Daemon
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
[Service]
|
||||
User=nobody
|
||||
Group=nogroup
|
||||
TimeoutSec=20
|
||||
Restart=always
|
||||
WorkingDirectory=/opt/hmci
|
||||
ExecStart=/usr/bin/java -jar lib/hmci-all.jar
|
|
@ -11,41 +11,67 @@ class App implements Runnable {
|
|||
HmcClient hmc
|
||||
InfluxClient influx
|
||||
|
||||
final ConfigObject configuration
|
||||
final Integer refreshEverySec
|
||||
final Integer rescanHmcEvery
|
||||
|
||||
Map<String, HmcClient> discoveredHmc = new HashMap<>()
|
||||
Map<String,ManagedSystem> systems = new HashMap<String, ManagedSystem>()
|
||||
Map<String, LogicalPartition> partitions = new HashMap<String, LogicalPartition>()
|
||||
|
||||
|
||||
App(String... args) {
|
||||
App(ConfigObject configuration) {
|
||||
log.debug configuration.toString()
|
||||
this.configuration = configuration
|
||||
|
||||
refreshEverySec = (Integer)configuration.get('hmci.refresh') ?: 60
|
||||
rescanHmcEvery = (Integer)configuration.get('hmci.rescan') ?: 15
|
||||
|
||||
try {
|
||||
influx = new InfluxClient((String) configuration.get('influx')['url'], (String) configuration.get('influx')['username'], (String) configuration.get('influx')['password'], (String) configuration.get('influx')['database'])
|
||||
influx.login()
|
||||
} catch(Exception e) {
|
||||
System.exit(1)
|
||||
}
|
||||
|
||||
// Initial scan
|
||||
discover()
|
||||
|
||||
run()
|
||||
}
|
||||
|
||||
|
||||
void scanHmc() {
|
||||
void discover() {
|
||||
|
||||
try {
|
||||
|
||||
if(hmc == null) {
|
||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
||||
hmc.login()
|
||||
configuration.get('hmc').each { Object key, Object hmc ->
|
||||
if(!discoveredHmc?.containsKey(key)) {
|
||||
log.info("Adding HMC: " + hmc.toString())
|
||||
HmcClient hmcClient = new HmcClient(key as String, hmc['url'] as String, hmc['username'] as String, hmc['password'] as String, hmc['unsafe'] as Boolean)
|
||||
discoveredHmc.put(key as String, hmcClient)
|
||||
}
|
||||
}
|
||||
|
||||
hmc.getManagedSystems().each { systemId, system ->
|
||||
discoveredHmc.each {id, hmcClient ->
|
||||
|
||||
// Add to list of known systems
|
||||
systems.putIfAbsent(systemId, system)
|
||||
try {
|
||||
hmcClient.login()
|
||||
hmcClient.getManagedSystems().each { systemId, system ->
|
||||
|
||||
// Get LPAR's for this system
|
||||
hmc.getLogicalPartitionsForManagedSystem(system).each { partitionId, partition ->
|
||||
// Add to list of known systems
|
||||
systems.putIfAbsent(systemId, system)
|
||||
|
||||
// Add to list of known partitions
|
||||
partitions.putIfAbsent(partitionId, partition)
|
||||
// Get LPAR's for this system
|
||||
hmcClient.getLogicalPartitionsForManagedSystem(system).each { partitionId, partition ->
|
||||
|
||||
// Add to list of known partitions
|
||||
partitions.putIfAbsent(partitionId, partition)
|
||||
}
|
||||
}
|
||||
|
||||
} catch(Exception e) {
|
||||
log.error("discover() - " + id + " error: " + e.message)
|
||||
discoveredHmc.remove(id)
|
||||
}
|
||||
|
||||
} catch(Exception e) {
|
||||
log.error(e.message)
|
||||
hmc = null
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -55,15 +81,12 @@ class App implements Runnable {
|
|||
|
||||
try {
|
||||
|
||||
if(hmc == null) {
|
||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
||||
hmc.login()
|
||||
}
|
||||
|
||||
systems.each {systemId, system ->
|
||||
|
||||
HmcClient hmcClient = discoveredHmc.get(system.hmcId)
|
||||
|
||||
// Get and process metrics for this system
|
||||
String tmpJsonString = hmc.getPcmDataForManagedSystem(system)
|
||||
String tmpJsonString = hmcClient.getPcmDataForManagedSystem(system)
|
||||
if(tmpJsonString && !tmpJsonString.empty) {
|
||||
system.processMetrics(tmpJsonString)
|
||||
}
|
||||
|
@ -82,17 +105,13 @@ class App implements Runnable {
|
|||
|
||||
try {
|
||||
|
||||
if(hmc == null) {
|
||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
||||
hmc.login()
|
||||
}
|
||||
|
||||
|
||||
// Get LPAR's for this system
|
||||
partitions.each { partitionId, partition ->
|
||||
|
||||
HmcClient hmcClient = discoveredHmc.get(partition.system.hmcId)
|
||||
|
||||
// Get and process metrics for this partition
|
||||
String tmpJsonString2 = hmc.getPcmDataForLogicalPartition(partition)
|
||||
String tmpJsonString2 = hmcClient.getPcmDataForLogicalPartition(partition)
|
||||
if(tmpJsonString2 && !tmpJsonString2.empty) {
|
||||
partition.processMetrics(tmpJsonString2)
|
||||
}
|
||||
|
@ -105,25 +124,15 @@ class App implements Runnable {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void writeMetricsForManagedSystems() {
|
||||
|
||||
if(!influx) {
|
||||
influx = new InfluxClient("http://127.0.0.1:8086", "root", "", "hmci")
|
||||
influx.login()
|
||||
}
|
||||
|
||||
systems.each {systemId, system ->
|
||||
influx.writeManagedSystem(system)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void writeMetricsForLogicalPartitions() {
|
||||
|
||||
if(!influx) {
|
||||
influx = new InfluxClient("http://127.0.0.1:8086", "root", "", "hmci")
|
||||
influx.login()
|
||||
}
|
||||
|
||||
partitions.each {partitionId, partition ->
|
||||
influx.writeLogicalPartition(partition)
|
||||
}
|
||||
|
@ -135,24 +144,28 @@ class App implements Runnable {
|
|||
def cli = new CliBuilder()
|
||||
cli.h(longOpt: 'help', 'display usage')
|
||||
cli.v(longOpt: 'version', 'display version')
|
||||
cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '~/.config/hmci.toml', 'configuration file')
|
||||
cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '/opt/hmci/conf/hmci.groovy', 'configuration file')
|
||||
|
||||
OptionAccessor options = cli.parse(args)
|
||||
if (options.h) cli.usage()
|
||||
|
||||
ConfigObject configuration
|
||||
if(options.c) {
|
||||
|
||||
File configurationFile = new File((String)options.config)
|
||||
if(configurationFile.exists()) {
|
||||
log.info("Configuration file found at: " + configurationFile.toString())
|
||||
} else {
|
||||
log.warn("No configuration file found at: " + configurationFile.toString())
|
||||
if(!configurationFile.exists()) {
|
||||
println("No configuration file found at: " + configurationFile.toString())
|
||||
System.exit(1)
|
||||
}
|
||||
|
||||
// Read in 'config.groovy' for the development environment.
|
||||
configuration = new ConfigSlurper("development").parse(configurationFile.toURI().toURL());
|
||||
|
||||
// Flatten configuration for easy access keys with dotted notation.
|
||||
//configuration = conf.flatten();
|
||||
}
|
||||
|
||||
// TODO: Read configuration file or create new empty file,
|
||||
// pass the properties or configuration bean to App.
|
||||
|
||||
new App().run()
|
||||
new App(configuration)
|
||||
System.exit(0);
|
||||
}
|
||||
|
||||
|
@ -163,10 +176,7 @@ class App implements Runnable {
|
|||
log.info("In RUN ")
|
||||
|
||||
boolean keepRunning = true
|
||||
int numberOfRuns = 0
|
||||
|
||||
// Do initial scan - TODO: should do this once in a while..
|
||||
scanHmc()
|
||||
int executions = 0
|
||||
|
||||
while(keepRunning) {
|
||||
|
||||
|
@ -176,22 +186,15 @@ class App implements Runnable {
|
|||
getMetricsForPartitions()
|
||||
writeMetricsForLogicalPartitions()
|
||||
|
||||
// Refresh HMC
|
||||
if(numberOfRuns % 5) {
|
||||
scanHmc()
|
||||
// Refresh HMC's
|
||||
if(executions % rescanHmcEvery) {
|
||||
discover()
|
||||
}
|
||||
|
||||
// Stop after some time
|
||||
if(numberOfRuns > 15) {
|
||||
keepRunning = false
|
||||
}
|
||||
|
||||
numberOfRuns++
|
||||
Thread.sleep(60 * 1000)
|
||||
executions++
|
||||
Thread.sleep(refreshEverySec * 1000)
|
||||
}
|
||||
|
||||
hmc?.logoff()
|
||||
influx?.logoff()
|
||||
}
|
||||
|
||||
}
|
|
@ -7,6 +7,7 @@ import okhttp3.OkHttpClient
|
|||
import okhttp3.Request
|
||||
import okhttp3.RequestBody
|
||||
import okhttp3.Response
|
||||
import org.influxdb.InfluxDBFactory
|
||||
|
||||
import javax.net.ssl.HostnameVerifier
|
||||
import javax.net.ssl.SSLContext
|
||||
|
@ -23,22 +24,29 @@ class HmcClient {
|
|||
|
||||
private final MediaType MEDIA_TYPE_IBM_XML_LOGIN = MediaType.parse("application/vnd.ibm.powervm.web+xml; type=LogonRequest");
|
||||
|
||||
private final String hmcId
|
||||
private final String baseUrl
|
||||
private final String username
|
||||
private final String password
|
||||
private final Boolean unsafe
|
||||
|
||||
//protected Map<String,ManagedSystem> managedSystems = new HashMap<String, ManagedSystem>()
|
||||
protected String authToken
|
||||
private final OkHttpClient client
|
||||
|
||||
|
||||
HmcClient(String baseUrl, String username, String password) {
|
||||
HmcClient(String hmcId, String baseUrl, String username, String password, Boolean unsafe = false) {
|
||||
this.hmcId = hmcId
|
||||
this.baseUrl = baseUrl
|
||||
this.username = username
|
||||
this.password = password
|
||||
this.unsafe = unsafe
|
||||
|
||||
//this.client = new OkHttpClient() // OR Unsafe (ignore SSL errors) below
|
||||
this.client = getUnsafeOkHttpClient()
|
||||
if(unsafe) {
|
||||
this.client = getUnsafeOkHttpClient()
|
||||
} else {
|
||||
this.client = new OkHttpClient()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -50,6 +58,10 @@ class HmcClient {
|
|||
*/
|
||||
void login() throws IOException {
|
||||
|
||||
if(authToken) {
|
||||
return
|
||||
}
|
||||
|
||||
String payload = """\
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
<LogonRequest xmlns="http://www.ibm.com/xmlns/systems/power/firmware/web/mc/2012_10/" schemaVersion="V1_0">
|
||||
|
@ -66,16 +78,22 @@ class HmcClient {
|
|||
.put(RequestBody.create(payload, MEDIA_TYPE_IBM_XML_LOGIN))
|
||||
.build();
|
||||
|
||||
Response response = client.newCall(request).execute();
|
||||
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
|
||||
try {
|
||||
Response response = client.newCall(request).execute();
|
||||
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
|
||||
|
||||
// Get response body and parse
|
||||
String responseBody = response.body.string()
|
||||
// Get response body and parse
|
||||
String responseBody = response.body.string()
|
||||
|
||||
def xml = new XmlSlurper().parseText(responseBody)
|
||||
authToken = xml.toString()
|
||||
def xml = new XmlSlurper().parseText(responseBody)
|
||||
authToken = xml.toString()
|
||||
|
||||
log.debug("login() - Auth Token: " + authToken)
|
||||
} catch(Exception e) {
|
||||
log.error(e.message)
|
||||
throw new Exception(e)
|
||||
}
|
||||
|
||||
log.debug("login() - Auth Token: " + authToken)
|
||||
}
|
||||
|
||||
|
||||
|
@ -119,6 +137,7 @@ class HmcClient {
|
|||
entry.content.each { content ->
|
||||
content.ManagedSystem.each { system ->
|
||||
ManagedSystem managedSystem = new ManagedSystem(
|
||||
hmcId,
|
||||
entry.id as String,
|
||||
system.SystemName as String,
|
||||
system.MachineTypeModelAndSerialNumber?.MachineType as String,
|
||||
|
|
|
@ -33,8 +33,13 @@ class InfluxClient {
|
|||
|
||||
void login() {
|
||||
if(!influxDB) {
|
||||
influxDB = InfluxDBFactory.connect(url, username, password);
|
||||
createDatabase()
|
||||
try {
|
||||
influxDB = InfluxDBFactory.connect(url, username, password);
|
||||
createDatabase()
|
||||
} catch(Exception e) {
|
||||
log.error(e.message)
|
||||
throw new Exception(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,26 +47,21 @@ class InfluxClient {
|
|||
influxDB?.close();
|
||||
}
|
||||
|
||||
|
||||
void createDatabase() {
|
||||
try {
|
||||
// Create a database...
|
||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
|
||||
influxDB.query(new Query("CREATE DATABASE " + database));
|
||||
influxDB.setDatabase(database);
|
||||
// Create a database...
|
||||
influxDB.query(new Query("CREATE DATABASE " + database));
|
||||
influxDB.setDatabase(database);
|
||||
|
||||
// ... and a retention policy, if necessary.
|
||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
|
||||
/*String retentionPolicyName = "one_day_only";
|
||||
influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
|
||||
+ " ON " + database + " DURATION 1d REPLICATION 1 DEFAULT"));
|
||||
influxDB.setRetentionPolicy(retentionPolicyName);*/
|
||||
|
||||
// Enable batch writes to get better performance.
|
||||
influxDB.enableBatch(BatchOptions.DEFAULTS);
|
||||
} catch(Exception e) {
|
||||
log.error("createDatabase()", e)
|
||||
}
|
||||
// ... and a retention policy, if necessary.
|
||||
/*
|
||||
String retentionPolicyName = "HMCI_ONE_YEAR";
|
||||
influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
|
||||
+ " ON " + database + " DURATION 365d REPLICATION 1 DEFAULT"));
|
||||
influxDB.setRetentionPolicy(retentionPolicyName);*/
|
||||
|
||||
// Enable batch writes to get better performance.
|
||||
influxDB.enableBatch(BatchOptions.DEFAULTS);
|
||||
}
|
||||
|
||||
|
||||
|
@ -84,17 +84,6 @@ class InfluxClient {
|
|||
}
|
||||
|
||||
|
||||
void read() {
|
||||
// Query your data using InfluxQL.
|
||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/data_exploration/#the-basic-select-statement
|
||||
QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet"));
|
||||
println(queryResult);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
Managed System
|
||||
|
@ -114,10 +103,7 @@ class InfluxClient {
|
|||
return
|
||||
}
|
||||
|
||||
BatchPoints batchPoints = BatchPoints
|
||||
.database(database)
|
||||
//.retentionPolicy("defaultPolicy")
|
||||
.build();
|
||||
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
|
||||
getSystemMemory(system, timestamp).each {
|
||||
batchPoints.point(it)
|
||||
|
@ -185,9 +171,7 @@ class InfluxClient {
|
|||
return
|
||||
}
|
||||
|
||||
BatchPoints batchPoints = BatchPoints
|
||||
.database(database)
|
||||
.build();
|
||||
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||
|
||||
getPartitionMemory(partition, timestamp).each {
|
||||
batchPoints.point(it)
|
||||
|
|
|
@ -32,14 +32,14 @@ class LogicalPartition extends MetaSystem {
|
|||
partition: name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getMemoryMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
logicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.logicalMem.first(),
|
||||
backedPhysicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.backedPhysicalMem.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getMemoryMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
return list
|
||||
|
@ -56,7 +56,7 @@ class LogicalPartition extends MetaSystem {
|
|||
partition: name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getProcessorMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
utilizedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedProcUnits.first(),
|
||||
|
@ -72,7 +72,7 @@ class LogicalPartition extends MetaSystem {
|
|||
timeSpentWaitingForDispatch: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.timePerInstructionExecution.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getProcessorMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
return list
|
||||
|
@ -94,7 +94,7 @@ class LogicalPartition extends MetaSystem {
|
|||
vswitchId: it.vswitchId as String,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getVirtualEthernetAdapterMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
receivedPhysicalBytes: it.receivedPhysicalBytes.first(),
|
||||
|
@ -103,7 +103,7 @@ class LogicalPartition extends MetaSystem {
|
|||
sentBytes: it.sentBytes.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getVirtualEthernetAdapterMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
}
|
||||
|
@ -126,14 +126,14 @@ class LogicalPartition extends MetaSystem {
|
|||
wwpn: it.wwpn,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getVirtualFiberChannelAdaptersMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
transmittedBytes: it.transmittedBytes.first(),
|
||||
writeBytes: it.writeBytes.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getVirtualFiberChannelAdaptersMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
}
|
||||
|
|
|
@ -6,16 +6,16 @@ import groovy.util.logging.Slf4j
|
|||
@Slf4j
|
||||
class ManagedSystem extends MetaSystem {
|
||||
|
||||
public String id
|
||||
public String name
|
||||
public String type
|
||||
public String model
|
||||
public String serialNumber
|
||||
|
||||
// From PCM Data
|
||||
public final String hmcId
|
||||
public final String id
|
||||
public final String name
|
||||
public final String type
|
||||
public final String model
|
||||
public final String serialNumber
|
||||
|
||||
|
||||
ManagedSystem(String id, String name, String type, String model, String serialNumber) {
|
||||
ManagedSystem(String hmcId, String id, String name, String type, String model, String serialNumber) {
|
||||
this.hmcId = hmcId
|
||||
this.id = id
|
||||
this.name = name
|
||||
this.type = type
|
||||
|
@ -37,7 +37,7 @@ class ManagedSystem extends MetaSystem {
|
|||
system: name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getMemoryMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
totalMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.totalMem.first(),
|
||||
|
@ -46,7 +46,7 @@ class ManagedSystem extends MetaSystem {
|
|||
assignedMemToLpars: metrics.systemUtil.utilSamples.first().serverUtil.memory.assignedMemToLpars.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getMemoryMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
return list
|
||||
|
@ -62,7 +62,7 @@ class ManagedSystem extends MetaSystem {
|
|||
system: name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getProcessorMetrics() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.totalProcUnits.first(),
|
||||
|
@ -71,7 +71,7 @@ class ManagedSystem extends MetaSystem {
|
|||
configurableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.configurableProcUnits.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getProcessorMetrics() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
return list
|
||||
|
@ -89,23 +89,23 @@ class ManagedSystem extends MetaSystem {
|
|||
pool: it.name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getSharedProcessorPools() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
assignedProcUnits: it.assignedProcUnits.first(),
|
||||
availableProcUnits: it.availableProcUnits.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getSharedProcessorPools() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
|
||||
}
|
||||
|
||||
return list
|
||||
|
||||
}
|
||||
|
||||
|
||||
List<Map> getSystemSharedAdapters() {
|
||||
|
||||
List<Map> list = new ArrayList<>()
|
||||
|
@ -120,7 +120,7 @@ class ManagedSystem extends MetaSystem {
|
|||
vios: vios.name,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getSystemSharedAdapters() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
sentBytes: it.sentBytes.first(),
|
||||
|
@ -128,7 +128,7 @@ class ManagedSystem extends MetaSystem {
|
|||
transferredBytes: it.transferredBytes.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getSystemSharedAdapters() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
}
|
||||
|
@ -150,9 +150,10 @@ class ManagedSystem extends MetaSystem {
|
|||
system: name,
|
||||
wwpn: it.wwpn,
|
||||
vios: vios.name,
|
||||
device: it.physicalLocation,
|
||||
]
|
||||
map.put("tags", tagsMap)
|
||||
log.debug(tagsMap.toString())
|
||||
log.debug("getSystemFiberChannelAdapters() - tags: " + tagsMap.toString())
|
||||
|
||||
HashMap<String, BigDecimal> fieldsMap = [
|
||||
writeBytes: it.writeBytes.first(),
|
||||
|
@ -160,7 +161,7 @@ class ManagedSystem extends MetaSystem {
|
|||
transmittedBytes: it.transmittedBytes.first(),
|
||||
]
|
||||
map.put("fields", fieldsMap)
|
||||
log.debug(fieldsMap.toString())
|
||||
log.debug("getSystemFiberChannelAdapters() - fields: " + fieldsMap.toString())
|
||||
|
||||
list.add(map)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import java.time.format.DateTimeFormatter
|
|||
import java.time.format.DateTimeParseException
|
||||
|
||||
@Slf4j
|
||||
class MetaSystem {
|
||||
abstract class MetaSystem {
|
||||
|
||||
protected PcmData metrics
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@ class HmcClientTest extends Specification {
|
|||
|
||||
def setup() {
|
||||
mockServer.start();
|
||||
hmc = new HmcClient(mockServer.url("/").toString(), "testUser", "testPassword")
|
||||
hmc = new HmcClient("site", mockServer.url("/").toString(), "testUser", "testPassword")
|
||||
hmc.authToken = "blaBla"
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ class HmcClientTest extends Specification {
|
|||
mockServer.enqueue(new MockResponse().setBody(testXml));
|
||||
|
||||
when:
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
Map<String, LogicalPartition> partitions = hmc.getLogicalPartitionsForManagedSystem(system)
|
||||
|
||||
then:
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
package biz.nellemann.hmci
|
||||
|
||||
import spock.lang.Ignore
|
||||
import spock.lang.Specification
|
||||
|
||||
@Ignore
|
||||
class InfluxClientTest extends Specification {
|
||||
|
||||
InfluxClient influxClient
|
||||
|
@ -23,7 +25,7 @@ class InfluxClientTest extends Specification {
|
|||
def testJson = testFile.getText('UTF-8')
|
||||
|
||||
when:
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||
system.processMetrics(testJson)
|
||||
influxClient.writeManagedSystem(system)
|
||||
|
||||
|
@ -40,7 +42,7 @@ class InfluxClientTest extends Specification {
|
|||
def testJson = testFile.getText('UTF-8')
|
||||
|
||||
when:
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||
|
||||
lpar.processMetrics(testJson)
|
||||
|
|
|
@ -12,7 +12,7 @@ class LogicalPartitionTest extends Specification {
|
|||
def testJson = testFile.getText('UTF-8')
|
||||
|
||||
when:
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||
lpar.processMetrics(testJson)
|
||||
|
||||
|
@ -28,7 +28,7 @@ class LogicalPartitionTest extends Specification {
|
|||
setup:
|
||||
def testFile = new File(getClass().getResource('/pcm-data-logical-partition.json').toURI())
|
||||
def testJson = testFile.getText('UTF-8')
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||
|
||||
when:
|
||||
|
|
|
@ -11,7 +11,7 @@ class ManagedSystemTest extends Specification {
|
|||
def testJson = testFile.getText('UTF-8')
|
||||
|
||||
when:
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
system.processMetrics(testJson)
|
||||
|
||||
then:
|
||||
|
@ -29,7 +29,7 @@ class ManagedSystemTest extends Specification {
|
|||
setup:
|
||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||
def testJson = testFile.getText('UTF-8')
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
|
||||
when:
|
||||
system.processMetrics(testJson)
|
||||
|
@ -45,7 +45,7 @@ class ManagedSystemTest extends Specification {
|
|||
setup:
|
||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||
def testJson = testFile.getText('UTF-8')
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
|
||||
when:
|
||||
system.processMetrics(testJson)
|
||||
|
@ -61,7 +61,7 @@ class ManagedSystemTest extends Specification {
|
|||
setup:
|
||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||
def testJson = testFile.getText('UTF-8')
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
|
||||
when:
|
||||
system.processMetrics(testJson)
|
||||
|
@ -72,24 +72,4 @@ class ManagedSystemTest extends Specification {
|
|||
listOfMaps.first().get("fields")['assignedProcUnits'] == 23.767
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
void "test getSharedAdapterMetrics"() {
|
||||
|
||||
setup:
|
||||
def testFile = new File(getClass().getResource('/pcm-data-logical-partition.json').toURI())
|
||||
def testJson = testFile.getText('UTF-8')
|
||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||
|
||||
when:
|
||||
lpar.processMetrics(testJson)
|
||||
List<Map> listOfMaps = lpar.getSharedAdapterMetrics()
|
||||
|
||||
then:
|
||||
listOfMaps.size() == 1
|
||||
listOfMaps.first().get("fields")['receivedBytes'] == 276.467
|
||||
listOfMaps.first().get("tags")['sea'] == 'ent5'
|
||||
}*/
|
||||
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue