Support for configuration file and multiple HMC's.
Initial work on build of deb/rpm packages. Build pipeline setup.
This commit is contained in:
parent
dc980e269d
commit
b5cdc968e5
11
.editorconfig
Normal file
11
.editorconfig
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
|
||||||
|
[*.yml]
|
||||||
|
indent_size = 2
|
21
README.md
21
README.md
|
@ -1,19 +1,22 @@
|
||||||
# HMC Insights
|
# HMC Insights
|
||||||
|
|
||||||
Small Java-based utility to fetch metrics from one or more HMC's and push those to an InfluxDB time-series database.
|
Small utility to fetch metrics from one or more HMC's and push those to an InfluxDB time-series database.
|
||||||
|
|
||||||
## TODO Liste
|
|
||||||
|
|
||||||
- Use TOML for configuration file, to support multiple HMC's - https://github.com/tomlj/tomlj
|
|
||||||
|
|
||||||
|
|
||||||
## Usage Instructions
|
## Usage Instructions
|
||||||
|
|
||||||
...
|
### Create Configuration
|
||||||
|
|
||||||
|
Create a configuration file with setup for HMC's and InfluxDB.
|
||||||
|
|
||||||
|
|
||||||
|
### Run HMCi Tool
|
||||||
|
|
||||||
|
Requires Java 8+ runtime.
|
||||||
|
|
||||||
|
|
||||||
## Development Information
|
## Development Information
|
||||||
|
|
||||||
### Build & Test
|
### Build & Test
|
||||||
|
|
||||||
Use the gradle build tool
|
Use the gradle build tool
|
||||||
|
|
||||||
|
@ -37,4 +40,4 @@ Start the Grafana container, linking it to the InfluxDB container
|
||||||
|
|
||||||
docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/grafana:7.1.3
|
docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/grafana:7.1.3
|
||||||
|
|
||||||
Configure a new InfluxDB datasource on **http://influxdb:8086** to talk to the InfluxDB container. The database must be created beforehand, this can be done by running the hmci tool.
|
Configure a new InfluxDB datasource on **http://influxdb:8086** named **hmci** to connect to the InfluxDB container. The database must be created beforehand, this can be done by running the hmci tool first. Grafana dashboards can be imported from the **doc/** folder.
|
||||||
|
|
22
bitbucket-pipelines.yml
Normal file
22
bitbucket-pipelines.yml
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
image: openjdk:8
|
||||||
|
|
||||||
|
pipelines:
|
||||||
|
branches:
|
||||||
|
master:
|
||||||
|
- step:
|
||||||
|
caches:
|
||||||
|
- gradle
|
||||||
|
name: Build and Test
|
||||||
|
script:
|
||||||
|
- ./gradlew clean build
|
||||||
|
tags: # add the 'tags' section
|
||||||
|
v*: # specify the tag
|
||||||
|
- step: # define the build pipeline for the tag
|
||||||
|
caches:
|
||||||
|
- gradle
|
||||||
|
name: Build and Release
|
||||||
|
script:
|
||||||
|
- ./gradlew clean build shadowJar startShadowScripts buildDep buildRpm
|
||||||
|
- for file in ${BITBUCKET_CLONE_DIR}/build/libs/*-all.jar ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
||||||
|
- for file in ${BITBUCKET_CLONE_DIR}/build/distribution/*_all.deb ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
||||||
|
- for file in ${BITBUCKET_CLONE_DIR}/build/distribution/*.noarch.rpm ; do curl -X POST --user "${BB_AUTH_STRING}" "https://api.bitbucket.org/2.0/repositories/${BITBUCKET_REPO_OWNER}/${BITBUCKET_REPO_SLUG}/downloads" --form files=@"${file}" ; done
|
34
build.gradle
34
build.gradle
|
@ -12,6 +12,9 @@ plugins {
|
||||||
|
|
||||||
// Apply the application plugin to add support for building a CLI application.
|
// Apply the application plugin to add support for building a CLI application.
|
||||||
id 'application'
|
id 'application'
|
||||||
|
|
||||||
|
id "com.github.johnrengelman.shadow" version "6.0.0"
|
||||||
|
id "nebula.ospackage" version "8.4.1"
|
||||||
}
|
}
|
||||||
|
|
||||||
repositories {
|
repositories {
|
||||||
|
@ -25,7 +28,6 @@ dependencies {
|
||||||
implementation 'org.codehaus.groovy:groovy-all:3.0.5'
|
implementation 'org.codehaus.groovy:groovy-all:3.0.5'
|
||||||
implementation 'com.squareup.okhttp3:okhttp:4.8.0'
|
implementation 'com.squareup.okhttp3:okhttp:4.8.0'
|
||||||
implementation 'org.influxdb:influxdb-java:2.19'
|
implementation 'org.influxdb:influxdb-java:2.19'
|
||||||
// implementation 'org.tomlj:tomlj:1.0.0'
|
|
||||||
implementation 'org.slf4j:slf4j-api:1.7.+'
|
implementation 'org.slf4j:slf4j-api:1.7.+'
|
||||||
runtimeOnly 'ch.qos.logback:logback-classic:1.+'
|
runtimeOnly 'ch.qos.logback:logback-classic:1.+'
|
||||||
|
|
||||||
|
@ -45,3 +47,33 @@ application {
|
||||||
test {
|
test {
|
||||||
useJUnitPlatform()
|
useJUnitPlatform()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
apply plugin: 'nebula.ospackage'
|
||||||
|
ospackage {
|
||||||
|
packageName = 'hmci'
|
||||||
|
release = '1'
|
||||||
|
|
||||||
|
into '/opt/hmci'
|
||||||
|
|
||||||
|
from(shadowJar.outputs.files) {
|
||||||
|
into 'lib'
|
||||||
|
}
|
||||||
|
|
||||||
|
from('build/scriptsShadow') {
|
||||||
|
into 'bin'
|
||||||
|
}
|
||||||
|
|
||||||
|
from('conf/') {
|
||||||
|
into 'conf'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
buildRpm {
|
||||||
|
dependsOn startShadowScripts
|
||||||
|
requires('java-1.8.0-openjdk-headless')
|
||||||
|
}
|
||||||
|
|
||||||
|
buildDeb {
|
||||||
|
dependsOn startShadowScripts
|
||||||
|
requires('default-jre-headless')
|
||||||
|
}
|
||||||
|
|
37
conf/hmci.groovy
Normal file
37
conf/hmci.groovy
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
/*
|
||||||
|
Configuration for HMCi
|
||||||
|
*/
|
||||||
|
|
||||||
|
hmci.refresh = 30
|
||||||
|
hmci.rescan = 15
|
||||||
|
|
||||||
|
// InfluxDB to save metrics
|
||||||
|
influx {
|
||||||
|
url = "http://10.32.64.29:8086"
|
||||||
|
username = "root"
|
||||||
|
password = ""
|
||||||
|
database = "hmci"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// One or more HMC to query for data and metrics
|
||||||
|
hmc {
|
||||||
|
|
||||||
|
// HMC on our primary site
|
||||||
|
site1 {
|
||||||
|
url = "https://10.32.64.39:12443"
|
||||||
|
username = "hmci"
|
||||||
|
password = "hmcihmci"
|
||||||
|
unsafe = true
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
site2 {
|
||||||
|
url = "https://10.32.64.39:12443"
|
||||||
|
username = "viewer"
|
||||||
|
password = "someSecret"
|
||||||
|
unsafe = false
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
}
|
2
gradle.properties
Normal file
2
gradle.properties
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
group = biz.nellemann.hmci
|
||||||
|
version = 1.0.1
|
13
hmci.service
Normal file
13
hmci.service
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
[Unit]
|
||||||
|
Description=HMC Insights Daemon
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
User=nobody
|
||||||
|
Group=nogroup
|
||||||
|
TimeoutSec=20
|
||||||
|
Restart=always
|
||||||
|
WorkingDirectory=/opt/hmci
|
||||||
|
ExecStart=/usr/bin/java -jar lib/hmci-all.jar
|
|
@ -11,41 +11,67 @@ class App implements Runnable {
|
||||||
HmcClient hmc
|
HmcClient hmc
|
||||||
InfluxClient influx
|
InfluxClient influx
|
||||||
|
|
||||||
|
final ConfigObject configuration
|
||||||
|
final Integer refreshEverySec
|
||||||
|
final Integer rescanHmcEvery
|
||||||
|
|
||||||
|
Map<String, HmcClient> discoveredHmc = new HashMap<>()
|
||||||
Map<String,ManagedSystem> systems = new HashMap<String, ManagedSystem>()
|
Map<String,ManagedSystem> systems = new HashMap<String, ManagedSystem>()
|
||||||
Map<String, LogicalPartition> partitions = new HashMap<String, LogicalPartition>()
|
Map<String, LogicalPartition> partitions = new HashMap<String, LogicalPartition>()
|
||||||
|
|
||||||
|
|
||||||
App(String... args) {
|
App(ConfigObject configuration) {
|
||||||
|
log.debug configuration.toString()
|
||||||
|
this.configuration = configuration
|
||||||
|
|
||||||
}
|
refreshEverySec = (Integer)configuration.get('hmci.refresh') ?: 60
|
||||||
|
rescanHmcEvery = (Integer)configuration.get('hmci.rescan') ?: 15
|
||||||
|
|
||||||
void scanHmc() {
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
influx = new InfluxClient((String) configuration.get('influx')['url'], (String) configuration.get('influx')['username'], (String) configuration.get('influx')['password'], (String) configuration.get('influx')['database'])
|
||||||
if(hmc == null) {
|
influx.login()
|
||||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
} catch(Exception e) {
|
||||||
hmc.login()
|
System.exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
hmc.getManagedSystems().each { systemId, system ->
|
// Initial scan
|
||||||
|
discover()
|
||||||
|
|
||||||
|
run()
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void discover() {
|
||||||
|
|
||||||
|
configuration.get('hmc').each { Object key, Object hmc ->
|
||||||
|
if(!discoveredHmc?.containsKey(key)) {
|
||||||
|
log.info("Adding HMC: " + hmc.toString())
|
||||||
|
HmcClient hmcClient = new HmcClient(key as String, hmc['url'] as String, hmc['username'] as String, hmc['password'] as String, hmc['unsafe'] as Boolean)
|
||||||
|
discoveredHmc.put(key as String, hmcClient)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
discoveredHmc.each {id, hmcClient ->
|
||||||
|
|
||||||
|
try {
|
||||||
|
hmcClient.login()
|
||||||
|
hmcClient.getManagedSystems().each { systemId, system ->
|
||||||
|
|
||||||
// Add to list of known systems
|
// Add to list of known systems
|
||||||
systems.putIfAbsent(systemId, system)
|
systems.putIfAbsent(systemId, system)
|
||||||
|
|
||||||
// Get LPAR's for this system
|
// Get LPAR's for this system
|
||||||
hmc.getLogicalPartitionsForManagedSystem(system).each { partitionId, partition ->
|
hmcClient.getLogicalPartitionsForManagedSystem(system).each { partitionId, partition ->
|
||||||
|
|
||||||
// Add to list of known partitions
|
// Add to list of known partitions
|
||||||
partitions.putIfAbsent(partitionId, partition)
|
partitions.putIfAbsent(partitionId, partition)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
} catch(Exception e) {
|
||||||
|
log.error("discover() - " + id + " error: " + e.message)
|
||||||
|
discoveredHmc.remove(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
} catch(Exception e) {
|
|
||||||
log.error(e.message)
|
|
||||||
hmc = null
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -55,15 +81,12 @@ class App implements Runnable {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
if(hmc == null) {
|
|
||||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
|
||||||
hmc.login()
|
|
||||||
}
|
|
||||||
|
|
||||||
systems.each {systemId, system ->
|
systems.each {systemId, system ->
|
||||||
|
|
||||||
|
HmcClient hmcClient = discoveredHmc.get(system.hmcId)
|
||||||
|
|
||||||
// Get and process metrics for this system
|
// Get and process metrics for this system
|
||||||
String tmpJsonString = hmc.getPcmDataForManagedSystem(system)
|
String tmpJsonString = hmcClient.getPcmDataForManagedSystem(system)
|
||||||
if(tmpJsonString && !tmpJsonString.empty) {
|
if(tmpJsonString && !tmpJsonString.empty) {
|
||||||
system.processMetrics(tmpJsonString)
|
system.processMetrics(tmpJsonString)
|
||||||
}
|
}
|
||||||
|
@ -82,17 +105,13 @@ class App implements Runnable {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
|
||||||
if(hmc == null) {
|
|
||||||
hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci")
|
|
||||||
hmc.login()
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Get LPAR's for this system
|
// Get LPAR's for this system
|
||||||
partitions.each { partitionId, partition ->
|
partitions.each { partitionId, partition ->
|
||||||
|
|
||||||
|
HmcClient hmcClient = discoveredHmc.get(partition.system.hmcId)
|
||||||
|
|
||||||
// Get and process metrics for this partition
|
// Get and process metrics for this partition
|
||||||
String tmpJsonString2 = hmc.getPcmDataForLogicalPartition(partition)
|
String tmpJsonString2 = hmcClient.getPcmDataForLogicalPartition(partition)
|
||||||
if(tmpJsonString2 && !tmpJsonString2.empty) {
|
if(tmpJsonString2 && !tmpJsonString2.empty) {
|
||||||
partition.processMetrics(tmpJsonString2)
|
partition.processMetrics(tmpJsonString2)
|
||||||
}
|
}
|
||||||
|
@ -105,25 +124,15 @@ class App implements Runnable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void writeMetricsForManagedSystems() {
|
void writeMetricsForManagedSystems() {
|
||||||
|
|
||||||
if(!influx) {
|
|
||||||
influx = new InfluxClient("http://127.0.0.1:8086", "root", "", "hmci")
|
|
||||||
influx.login()
|
|
||||||
}
|
|
||||||
|
|
||||||
systems.each {systemId, system ->
|
systems.each {systemId, system ->
|
||||||
influx.writeManagedSystem(system)
|
influx.writeManagedSystem(system)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void writeMetricsForLogicalPartitions() {
|
void writeMetricsForLogicalPartitions() {
|
||||||
|
|
||||||
if(!influx) {
|
|
||||||
influx = new InfluxClient("http://127.0.0.1:8086", "root", "", "hmci")
|
|
||||||
influx.login()
|
|
||||||
}
|
|
||||||
|
|
||||||
partitions.each {partitionId, partition ->
|
partitions.each {partitionId, partition ->
|
||||||
influx.writeLogicalPartition(partition)
|
influx.writeLogicalPartition(partition)
|
||||||
}
|
}
|
||||||
|
@ -135,24 +144,28 @@ class App implements Runnable {
|
||||||
def cli = new CliBuilder()
|
def cli = new CliBuilder()
|
||||||
cli.h(longOpt: 'help', 'display usage')
|
cli.h(longOpt: 'help', 'display usage')
|
||||||
cli.v(longOpt: 'version', 'display version')
|
cli.v(longOpt: 'version', 'display version')
|
||||||
cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '~/.config/hmci.toml', 'configuration file')
|
cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '/opt/hmci/conf/hmci.groovy', 'configuration file')
|
||||||
|
|
||||||
OptionAccessor options = cli.parse(args)
|
OptionAccessor options = cli.parse(args)
|
||||||
if (options.h) cli.usage()
|
if (options.h) cli.usage()
|
||||||
|
|
||||||
|
ConfigObject configuration
|
||||||
if(options.c) {
|
if(options.c) {
|
||||||
|
|
||||||
File configurationFile = new File((String)options.config)
|
File configurationFile = new File((String)options.config)
|
||||||
if(configurationFile.exists()) {
|
if(!configurationFile.exists()) {
|
||||||
log.info("Configuration file found at: " + configurationFile.toString())
|
println("No configuration file found at: " + configurationFile.toString())
|
||||||
} else {
|
System.exit(1)
|
||||||
log.warn("No configuration file found at: " + configurationFile.toString())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Read configuration file or create new empty file,
|
// Read in 'config.groovy' for the development environment.
|
||||||
// pass the properties or configuration bean to App.
|
configuration = new ConfigSlurper("development").parse(configurationFile.toURI().toURL());
|
||||||
|
|
||||||
new App().run()
|
// Flatten configuration for easy access keys with dotted notation.
|
||||||
|
//configuration = conf.flatten();
|
||||||
|
}
|
||||||
|
|
||||||
|
new App(configuration)
|
||||||
System.exit(0);
|
System.exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -163,10 +176,7 @@ class App implements Runnable {
|
||||||
log.info("In RUN ")
|
log.info("In RUN ")
|
||||||
|
|
||||||
boolean keepRunning = true
|
boolean keepRunning = true
|
||||||
int numberOfRuns = 0
|
int executions = 0
|
||||||
|
|
||||||
// Do initial scan - TODO: should do this once in a while..
|
|
||||||
scanHmc()
|
|
||||||
|
|
||||||
while(keepRunning) {
|
while(keepRunning) {
|
||||||
|
|
||||||
|
@ -176,22 +186,15 @@ class App implements Runnable {
|
||||||
getMetricsForPartitions()
|
getMetricsForPartitions()
|
||||||
writeMetricsForLogicalPartitions()
|
writeMetricsForLogicalPartitions()
|
||||||
|
|
||||||
// Refresh HMC
|
// Refresh HMC's
|
||||||
if(numberOfRuns % 5) {
|
if(executions % rescanHmcEvery) {
|
||||||
scanHmc()
|
discover()
|
||||||
|
}
|
||||||
|
|
||||||
|
executions++
|
||||||
|
Thread.sleep(refreshEverySec * 1000)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop after some time
|
|
||||||
if(numberOfRuns > 15) {
|
|
||||||
keepRunning = false
|
|
||||||
}
|
|
||||||
|
|
||||||
numberOfRuns++
|
|
||||||
Thread.sleep(60 * 1000)
|
|
||||||
}
|
|
||||||
|
|
||||||
hmc?.logoff()
|
|
||||||
influx?.logoff()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
|
@ -7,6 +7,7 @@ import okhttp3.OkHttpClient
|
||||||
import okhttp3.Request
|
import okhttp3.Request
|
||||||
import okhttp3.RequestBody
|
import okhttp3.RequestBody
|
||||||
import okhttp3.Response
|
import okhttp3.Response
|
||||||
|
import org.influxdb.InfluxDBFactory
|
||||||
|
|
||||||
import javax.net.ssl.HostnameVerifier
|
import javax.net.ssl.HostnameVerifier
|
||||||
import javax.net.ssl.SSLContext
|
import javax.net.ssl.SSLContext
|
||||||
|
@ -23,22 +24,29 @@ class HmcClient {
|
||||||
|
|
||||||
private final MediaType MEDIA_TYPE_IBM_XML_LOGIN = MediaType.parse("application/vnd.ibm.powervm.web+xml; type=LogonRequest");
|
private final MediaType MEDIA_TYPE_IBM_XML_LOGIN = MediaType.parse("application/vnd.ibm.powervm.web+xml; type=LogonRequest");
|
||||||
|
|
||||||
|
private final String hmcId
|
||||||
private final String baseUrl
|
private final String baseUrl
|
||||||
private final String username
|
private final String username
|
||||||
private final String password
|
private final String password
|
||||||
|
private final Boolean unsafe
|
||||||
|
|
||||||
//protected Map<String,ManagedSystem> managedSystems = new HashMap<String, ManagedSystem>()
|
//protected Map<String,ManagedSystem> managedSystems = new HashMap<String, ManagedSystem>()
|
||||||
protected String authToken
|
protected String authToken
|
||||||
private final OkHttpClient client
|
private final OkHttpClient client
|
||||||
|
|
||||||
|
|
||||||
HmcClient(String baseUrl, String username, String password) {
|
HmcClient(String hmcId, String baseUrl, String username, String password, Boolean unsafe = false) {
|
||||||
|
this.hmcId = hmcId
|
||||||
this.baseUrl = baseUrl
|
this.baseUrl = baseUrl
|
||||||
this.username = username
|
this.username = username
|
||||||
this.password = password
|
this.password = password
|
||||||
|
this.unsafe = unsafe
|
||||||
|
|
||||||
//this.client = new OkHttpClient() // OR Unsafe (ignore SSL errors) below
|
if(unsafe) {
|
||||||
this.client = getUnsafeOkHttpClient()
|
this.client = getUnsafeOkHttpClient()
|
||||||
|
} else {
|
||||||
|
this.client = new OkHttpClient()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -50,6 +58,10 @@ class HmcClient {
|
||||||
*/
|
*/
|
||||||
void login() throws IOException {
|
void login() throws IOException {
|
||||||
|
|
||||||
|
if(authToken) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
String payload = """\
|
String payload = """\
|
||||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||||
<LogonRequest xmlns="http://www.ibm.com/xmlns/systems/power/firmware/web/mc/2012_10/" schemaVersion="V1_0">
|
<LogonRequest xmlns="http://www.ibm.com/xmlns/systems/power/firmware/web/mc/2012_10/" schemaVersion="V1_0">
|
||||||
|
@ -66,6 +78,7 @@ class HmcClient {
|
||||||
.put(RequestBody.create(payload, MEDIA_TYPE_IBM_XML_LOGIN))
|
.put(RequestBody.create(payload, MEDIA_TYPE_IBM_XML_LOGIN))
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
|
try {
|
||||||
Response response = client.newCall(request).execute();
|
Response response = client.newCall(request).execute();
|
||||||
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
|
if (!response.isSuccessful()) throw new IOException("Unexpected code " + response);
|
||||||
|
|
||||||
|
@ -76,6 +89,11 @@ class HmcClient {
|
||||||
authToken = xml.toString()
|
authToken = xml.toString()
|
||||||
|
|
||||||
log.debug("login() - Auth Token: " + authToken)
|
log.debug("login() - Auth Token: " + authToken)
|
||||||
|
} catch(Exception e) {
|
||||||
|
log.error(e.message)
|
||||||
|
throw new Exception(e)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -119,6 +137,7 @@ class HmcClient {
|
||||||
entry.content.each { content ->
|
entry.content.each { content ->
|
||||||
content.ManagedSystem.each { system ->
|
content.ManagedSystem.each { system ->
|
||||||
ManagedSystem managedSystem = new ManagedSystem(
|
ManagedSystem managedSystem = new ManagedSystem(
|
||||||
|
hmcId,
|
||||||
entry.id as String,
|
entry.id as String,
|
||||||
system.SystemName as String,
|
system.SystemName as String,
|
||||||
system.MachineTypeModelAndSerialNumber?.MachineType as String,
|
system.MachineTypeModelAndSerialNumber?.MachineType as String,
|
||||||
|
|
|
@ -33,8 +33,13 @@ class InfluxClient {
|
||||||
|
|
||||||
void login() {
|
void login() {
|
||||||
if(!influxDB) {
|
if(!influxDB) {
|
||||||
|
try {
|
||||||
influxDB = InfluxDBFactory.connect(url, username, password);
|
influxDB = InfluxDBFactory.connect(url, username, password);
|
||||||
createDatabase()
|
createDatabase()
|
||||||
|
} catch(Exception e) {
|
||||||
|
log.error(e.message)
|
||||||
|
throw new Exception(e)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,26 +47,21 @@ class InfluxClient {
|
||||||
influxDB?.close();
|
influxDB?.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void createDatabase() {
|
void createDatabase() {
|
||||||
try {
|
|
||||||
// Create a database...
|
// Create a database...
|
||||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
|
|
||||||
influxDB.query(new Query("CREATE DATABASE " + database));
|
influxDB.query(new Query("CREATE DATABASE " + database));
|
||||||
influxDB.setDatabase(database);
|
influxDB.setDatabase(database);
|
||||||
|
|
||||||
// ... and a retention policy, if necessary.
|
// ... and a retention policy, if necessary.
|
||||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/database_management/
|
/*
|
||||||
/*String retentionPolicyName = "one_day_only";
|
String retentionPolicyName = "HMCI_ONE_YEAR";
|
||||||
influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
|
influxDB.query(new Query("CREATE RETENTION POLICY " + retentionPolicyName
|
||||||
+ " ON " + database + " DURATION 1d REPLICATION 1 DEFAULT"));
|
+ " ON " + database + " DURATION 365d REPLICATION 1 DEFAULT"));
|
||||||
influxDB.setRetentionPolicy(retentionPolicyName);*/
|
influxDB.setRetentionPolicy(retentionPolicyName);*/
|
||||||
|
|
||||||
// Enable batch writes to get better performance.
|
// Enable batch writes to get better performance.
|
||||||
influxDB.enableBatch(BatchOptions.DEFAULTS);
|
influxDB.enableBatch(BatchOptions.DEFAULTS);
|
||||||
} catch(Exception e) {
|
|
||||||
log.error("createDatabase()", e)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -84,17 +84,6 @@ class InfluxClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void read() {
|
|
||||||
// Query your data using InfluxQL.
|
|
||||||
// https://docs.influxdata.com/influxdb/v1.7/query_language/data_exploration/#the-basic-select-statement
|
|
||||||
QueryResult queryResult = influxDB.query(new Query("SELECT * FROM h2o_feet"));
|
|
||||||
println(queryResult);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Managed System
|
Managed System
|
||||||
|
@ -114,10 +103,7 @@ class InfluxClient {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchPoints batchPoints = BatchPoints
|
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||||
.database(database)
|
|
||||||
//.retentionPolicy("defaultPolicy")
|
|
||||||
.build();
|
|
||||||
|
|
||||||
getSystemMemory(system, timestamp).each {
|
getSystemMemory(system, timestamp).each {
|
||||||
batchPoints.point(it)
|
batchPoints.point(it)
|
||||||
|
@ -185,9 +171,7 @@ class InfluxClient {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
BatchPoints batchPoints = BatchPoints
|
BatchPoints batchPoints = BatchPoints.database(database).build();
|
||||||
.database(database)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
getPartitionMemory(partition, timestamp).each {
|
getPartitionMemory(partition, timestamp).each {
|
||||||
batchPoints.point(it)
|
batchPoints.point(it)
|
||||||
|
|
|
@ -32,14 +32,14 @@ class LogicalPartition extends MetaSystem {
|
||||||
partition: name,
|
partition: name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getMemoryMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
logicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.logicalMem.first(),
|
logicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.logicalMem.first(),
|
||||||
backedPhysicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.backedPhysicalMem.first(),
|
backedPhysicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.backedPhysicalMem.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getMemoryMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
return list
|
return list
|
||||||
|
@ -56,7 +56,7 @@ class LogicalPartition extends MetaSystem {
|
||||||
partition: name,
|
partition: name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getProcessorMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
utilizedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedProcUnits.first(),
|
utilizedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedProcUnits.first(),
|
||||||
|
@ -72,7 +72,7 @@ class LogicalPartition extends MetaSystem {
|
||||||
timeSpentWaitingForDispatch: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.timePerInstructionExecution.first(),
|
timeSpentWaitingForDispatch: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.timePerInstructionExecution.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getProcessorMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
return list
|
return list
|
||||||
|
@ -94,7 +94,7 @@ class LogicalPartition extends MetaSystem {
|
||||||
vswitchId: it.vswitchId as String,
|
vswitchId: it.vswitchId as String,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getVirtualEthernetAdapterMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
receivedPhysicalBytes: it.receivedPhysicalBytes.first(),
|
receivedPhysicalBytes: it.receivedPhysicalBytes.first(),
|
||||||
|
@ -103,7 +103,7 @@ class LogicalPartition extends MetaSystem {
|
||||||
sentBytes: it.sentBytes.first(),
|
sentBytes: it.sentBytes.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getVirtualEthernetAdapterMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
}
|
}
|
||||||
|
@ -126,14 +126,14 @@ class LogicalPartition extends MetaSystem {
|
||||||
wwpn: it.wwpn,
|
wwpn: it.wwpn,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getVirtualFiberChannelAdaptersMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
transmittedBytes: it.transmittedBytes.first(),
|
transmittedBytes: it.transmittedBytes.first(),
|
||||||
writeBytes: it.writeBytes.first(),
|
writeBytes: it.writeBytes.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getVirtualFiberChannelAdaptersMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,16 +6,16 @@ import groovy.util.logging.Slf4j
|
||||||
@Slf4j
|
@Slf4j
|
||||||
class ManagedSystem extends MetaSystem {
|
class ManagedSystem extends MetaSystem {
|
||||||
|
|
||||||
public String id
|
public final String hmcId
|
||||||
public String name
|
public final String id
|
||||||
public String type
|
public final String name
|
||||||
public String model
|
public final String type
|
||||||
public String serialNumber
|
public final String model
|
||||||
|
public final String serialNumber
|
||||||
// From PCM Data
|
|
||||||
|
|
||||||
|
|
||||||
ManagedSystem(String id, String name, String type, String model, String serialNumber) {
|
ManagedSystem(String hmcId, String id, String name, String type, String model, String serialNumber) {
|
||||||
|
this.hmcId = hmcId
|
||||||
this.id = id
|
this.id = id
|
||||||
this.name = name
|
this.name = name
|
||||||
this.type = type
|
this.type = type
|
||||||
|
@ -37,7 +37,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
system: name,
|
system: name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getMemoryMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
totalMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.totalMem.first(),
|
totalMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.totalMem.first(),
|
||||||
|
@ -46,7 +46,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
assignedMemToLpars: metrics.systemUtil.utilSamples.first().serverUtil.memory.assignedMemToLpars.first(),
|
assignedMemToLpars: metrics.systemUtil.utilSamples.first().serverUtil.memory.assignedMemToLpars.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getMemoryMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
return list
|
return list
|
||||||
|
@ -62,7 +62,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
system: name,
|
system: name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getProcessorMetrics() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.totalProcUnits.first(),
|
availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.totalProcUnits.first(),
|
||||||
|
@ -71,7 +71,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
configurableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.configurableProcUnits.first(),
|
configurableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.configurableProcUnits.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getProcessorMetrics() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
return list
|
return list
|
||||||
|
@ -89,23 +89,23 @@ class ManagedSystem extends MetaSystem {
|
||||||
pool: it.name,
|
pool: it.name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getSharedProcessorPools() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
assignedProcUnits: it.assignedProcUnits.first(),
|
assignedProcUnits: it.assignedProcUnits.first(),
|
||||||
availableProcUnits: it.availableProcUnits.first(),
|
availableProcUnits: it.availableProcUnits.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getSharedProcessorPools() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return list
|
return list
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
List<Map> getSystemSharedAdapters() {
|
List<Map> getSystemSharedAdapters() {
|
||||||
|
|
||||||
List<Map> list = new ArrayList<>()
|
List<Map> list = new ArrayList<>()
|
||||||
|
@ -120,7 +120,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
vios: vios.name,
|
vios: vios.name,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getSystemSharedAdapters() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
sentBytes: it.sentBytes.first(),
|
sentBytes: it.sentBytes.first(),
|
||||||
|
@ -128,7 +128,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
transferredBytes: it.transferredBytes.first(),
|
transferredBytes: it.transferredBytes.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getSystemSharedAdapters() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
}
|
}
|
||||||
|
@ -150,9 +150,10 @@ class ManagedSystem extends MetaSystem {
|
||||||
system: name,
|
system: name,
|
||||||
wwpn: it.wwpn,
|
wwpn: it.wwpn,
|
||||||
vios: vios.name,
|
vios: vios.name,
|
||||||
|
device: it.physicalLocation,
|
||||||
]
|
]
|
||||||
map.put("tags", tagsMap)
|
map.put("tags", tagsMap)
|
||||||
log.debug(tagsMap.toString())
|
log.debug("getSystemFiberChannelAdapters() - tags: " + tagsMap.toString())
|
||||||
|
|
||||||
HashMap<String, BigDecimal> fieldsMap = [
|
HashMap<String, BigDecimal> fieldsMap = [
|
||||||
writeBytes: it.writeBytes.first(),
|
writeBytes: it.writeBytes.first(),
|
||||||
|
@ -160,7 +161,7 @@ class ManagedSystem extends MetaSystem {
|
||||||
transmittedBytes: it.transmittedBytes.first(),
|
transmittedBytes: it.transmittedBytes.first(),
|
||||||
]
|
]
|
||||||
map.put("fields", fieldsMap)
|
map.put("fields", fieldsMap)
|
||||||
log.debug(fieldsMap.toString())
|
log.debug("getSystemFiberChannelAdapters() - fields: " + fieldsMap.toString())
|
||||||
|
|
||||||
list.add(map)
|
list.add(map)
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import java.time.format.DateTimeFormatter
|
||||||
import java.time.format.DateTimeParseException
|
import java.time.format.DateTimeParseException
|
||||||
|
|
||||||
@Slf4j
|
@Slf4j
|
||||||
class MetaSystem {
|
abstract class MetaSystem {
|
||||||
|
|
||||||
protected PcmData metrics
|
protected PcmData metrics
|
||||||
|
|
||||||
|
|
|
@ -12,7 +12,7 @@ class HmcClientTest extends Specification {
|
||||||
|
|
||||||
def setup() {
|
def setup() {
|
||||||
mockServer.start();
|
mockServer.start();
|
||||||
hmc = new HmcClient(mockServer.url("/").toString(), "testUser", "testPassword")
|
hmc = new HmcClient("site", mockServer.url("/").toString(), "testUser", "testPassword")
|
||||||
hmc.authToken = "blaBla"
|
hmc.authToken = "blaBla"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ class HmcClientTest extends Specification {
|
||||||
mockServer.enqueue(new MockResponse().setBody(testXml));
|
mockServer.enqueue(new MockResponse().setBody(testXml));
|
||||||
|
|
||||||
when:
|
when:
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
Map<String, LogicalPartition> partitions = hmc.getLogicalPartitionsForManagedSystem(system)
|
Map<String, LogicalPartition> partitions = hmc.getLogicalPartitionsForManagedSystem(system)
|
||||||
|
|
||||||
then:
|
then:
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
package biz.nellemann.hmci
|
package biz.nellemann.hmci
|
||||||
|
|
||||||
|
import spock.lang.Ignore
|
||||||
import spock.lang.Specification
|
import spock.lang.Specification
|
||||||
|
|
||||||
|
@Ignore
|
||||||
class InfluxClientTest extends Specification {
|
class InfluxClientTest extends Specification {
|
||||||
|
|
||||||
InfluxClient influxClient
|
InfluxClient influxClient
|
||||||
|
@ -23,7 +25,7 @@ class InfluxClientTest extends Specification {
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
|
|
||||||
when:
|
when:
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||||
system.processMetrics(testJson)
|
system.processMetrics(testJson)
|
||||||
influxClient.writeManagedSystem(system)
|
influxClient.writeManagedSystem(system)
|
||||||
|
|
||||||
|
@ -40,7 +42,7 @@ class InfluxClientTest extends Specification {
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
|
|
||||||
when:
|
when:
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "TestSystem", "TestType", "TestModel", "Test s/n")
|
||||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||||
|
|
||||||
lpar.processMetrics(testJson)
|
lpar.processMetrics(testJson)
|
||||||
|
|
|
@ -12,7 +12,7 @@ class LogicalPartitionTest extends Specification {
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
|
|
||||||
when:
|
when:
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||||
lpar.processMetrics(testJson)
|
lpar.processMetrics(testJson)
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ class LogicalPartitionTest extends Specification {
|
||||||
setup:
|
setup:
|
||||||
def testFile = new File(getClass().getResource('/pcm-data-logical-partition.json').toURI())
|
def testFile = new File(getClass().getResource('/pcm-data-logical-partition.json').toURI())
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
||||||
|
|
||||||
when:
|
when:
|
||||||
|
|
|
@ -11,7 +11,7 @@ class ManagedSystemTest extends Specification {
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
|
|
||||||
when:
|
when:
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
system.processMetrics(testJson)
|
system.processMetrics(testJson)
|
||||||
|
|
||||||
then:
|
then:
|
||||||
|
@ -29,7 +29,7 @@ class ManagedSystemTest extends Specification {
|
||||||
setup:
|
setup:
|
||||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
system.processMetrics(testJson)
|
system.processMetrics(testJson)
|
||||||
|
@ -45,7 +45,7 @@ class ManagedSystemTest extends Specification {
|
||||||
setup:
|
setup:
|
||||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
system.processMetrics(testJson)
|
system.processMetrics(testJson)
|
||||||
|
@ -61,7 +61,7 @@ class ManagedSystemTest extends Specification {
|
||||||
setup:
|
setup:
|
||||||
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
def testFile = new File(getClass().getResource('/pcm-data-managed-system.json').toURI())
|
||||||
def testJson = testFile.getText('UTF-8')
|
def testJson = testFile.getText('UTF-8')
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
ManagedSystem system = new ManagedSystem("site1", "e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
||||||
|
|
||||||
when:
|
when:
|
||||||
system.processMetrics(testJson)
|
system.processMetrics(testJson)
|
||||||
|
@ -72,24 +72,4 @@ class ManagedSystemTest extends Specification {
|
||||||
listOfMaps.first().get("fields")['assignedProcUnits'] == 23.767
|
listOfMaps.first().get("fields")['assignedProcUnits'] == 23.767
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
void "test getSharedAdapterMetrics"() {
|
|
||||||
|
|
||||||
setup:
|
|
||||||
def testFile = new File(getClass().getResource('/pcm-data-logical-partition.json').toURI())
|
|
||||||
def testJson = testFile.getText('UTF-8')
|
|
||||||
ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N")
|
|
||||||
LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system)
|
|
||||||
|
|
||||||
when:
|
|
||||||
lpar.processMetrics(testJson)
|
|
||||||
List<Map> listOfMaps = lpar.getSharedAdapterMetrics()
|
|
||||||
|
|
||||||
then:
|
|
||||||
listOfMaps.size() == 1
|
|
||||||
listOfMaps.first().get("fields")['receivedBytes'] == 276.467
|
|
||||||
listOfMaps.first().get("tags")['sea'] == 'ent5'
|
|
||||||
}*/
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue