From decac80ac66ae4b7ba7ab80edd699087a6f541e9 Mon Sep 17 00:00:00 2001 From: Mark Nellemann Date: Wed, 12 Aug 2020 17:00:31 +0200 Subject: [PATCH] More work on getting metrics into influx. --- README.md | 22 ++- src/main/groovy/biz/nellemann/hmci/App.groovy | 115 +++++++---- .../biz/nellemann/hmci/HmcClient.groovy | 20 +- .../biz/nellemann/hmci/InfluxClient.groovy | 183 ++++++++++++++++-- .../nellemann/hmci/LogicalPartition.groovy | 74 ++++++- .../biz/nellemann/hmci/ManagedSystem.groovy | 58 +++++- .../biz/nellemann/hmci/MetaSystem.groovy | 1 - .../biz/nellemann/hmci/pojo/ServerUtil.groovy | 2 +- .../biz/nellemann/hmci/HmcClientTest.groovy | 2 +- .../hmci/LogicalPartitionTest.groovy | 3 +- 10 files changed, 396 insertions(+), 84 deletions(-) diff --git a/README.md b/README.md index 5971e39..2e7861a 100644 --- a/README.md +++ b/README.md @@ -13,14 +13,28 @@ Small Java-based utility to fetch metrics from one or more HMC's and push those ## Development Information +### Build & Test -### InfluxDB for test and development +Use the gradle build tool -Start the influxdb container + ./gradlew clean build + + +### InfluxDB for local testing + +Start the InfluxDB container docker run --name=influxdb -d -p 8086:8086 influxdb - -Use the influx client from the container +To use the Influx client from the same container docker exec -it influxdb influx + + +### Grafana for local testing + +Start the Grafana container, linking it to the InfluxDB container + + docker run --name grafana --link influxdb:influxdb -d -p 3000:3000 grafana/grafana:7.1.3 + +Configure a new InfluxDB datasource on **http://influxdb:8086** to talk to the InfluxDB container. The database must be created beforehand, this can be done by running the hmci tool. \ No newline at end of file diff --git a/src/main/groovy/biz/nellemann/hmci/App.groovy b/src/main/groovy/biz/nellemann/hmci/App.groovy index 5d257a1..4ef2821 100644 --- a/src/main/groovy/biz/nellemann/hmci/App.groovy +++ b/src/main/groovy/biz/nellemann/hmci/App.groovy @@ -6,7 +6,7 @@ import groovy.cli.picocli.OptionAccessor import groovy.util.logging.Slf4j @Slf4j -class App { +class App implements Runnable { HmcClient hmc InfluxClient influx @@ -17,39 +17,6 @@ class App { App(String... args) { - def cli = new CliBuilder() - cli.h(longOpt: 'help', 'display usage') - cli.v(longOpt: 'version', 'display version') - cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '~/.config/hmci.toml', 'configuration file') - - - OptionAccessor options = cli.parse(args) - if (options.h) cli.usage() - - if(options.c) { - File configurationFile = new File((String)options.config) - if(configurationFile.exists()) { - log.info("Configuration file found at: " + configurationFile.toString()) - } else { - log.warn("No configuration file found at: " + configurationFile.toString()) - } - } - - // TODO: Read configuration file or create new empty file, - // pass the properties or configuration bean to App. - - - hmc = new HmcClient("https://10.32.64.39:12443", "hmci", "hmcihmci") - hmc.login() - scanHmc() - getMetricsForSystems() - //getMetricsForPartitions() - - writeMetricsForManagedSystems() - - hmc?.logoff() - influx?.logoff() - } @@ -68,7 +35,7 @@ class App { systems.putIfAbsent(systemId, system) // Get LPAR's for this system - hmc.getLogicalPartitionsForManagedSystemWithId(systemId).each { partitionId, partition -> + hmc.getLogicalPartitionsForManagedSystem(system).each { partitionId, partition -> // Add to list of known partitions partitions.putIfAbsent(partitionId, partition) @@ -96,7 +63,7 @@ class App { systems.each {systemId, system -> // Get and process metrics for this system - String tmpJsonString = hmc.getPcmDataForManagedSystem(systemId) + String tmpJsonString = hmc.getPcmDataForManagedSystem(system) if(tmpJsonString && !tmpJsonString.empty) { system.processMetrics(tmpJsonString) } @@ -110,6 +77,7 @@ class App { } + void getMetricsForPartitions() { try { @@ -124,7 +92,7 @@ class App { partitions.each { partitionId, partition -> // Get and process metrics for this partition - String tmpJsonString2 = hmc.getPcmDataForLogicalPartition(partition.systemId, partitionId) + String tmpJsonString2 = hmc.getPcmDataForLogicalPartition(partition) if(tmpJsonString2 && !tmpJsonString2.empty) { partition.processMetrics(tmpJsonString2) } @@ -149,10 +117,81 @@ class App { } } + void writeMetricsForLogicalPartitions() { + + if(!influx) { + influx = new InfluxClient("http://127.0.0.1:8086", "root", "", "hmci") + influx.login() + } + + partitions.each {partitionId, partition -> + influx.writeLogicalPartition(partition) + } + } + static void main(String... args) { - new App(args) + + def cli = new CliBuilder() + cli.h(longOpt: 'help', 'display usage') + cli.v(longOpt: 'version', 'display version') + cli.c(longOpt: 'config', args: 1, required: true, defaultValue: '~/.config/hmci.toml', 'configuration file') + + OptionAccessor options = cli.parse(args) + if (options.h) cli.usage() + + if(options.c) { + File configurationFile = new File((String)options.config) + if(configurationFile.exists()) { + log.info("Configuration file found at: " + configurationFile.toString()) + } else { + log.warn("No configuration file found at: " + configurationFile.toString()) + } + } + + // TODO: Read configuration file or create new empty file, + // pass the properties or configuration bean to App. + + new App().run() System.exit(0); } + + @Override + void run() { + + log.info("In RUN ") + + boolean keepRunning = true + int numberOfRuns = 0 + + // Do initial scan - TODO: should do this once in a while.. + scanHmc() + + while(keepRunning) { + + getMetricsForSystems() + writeMetricsForManagedSystems() + + getMetricsForPartitions() + writeMetricsForLogicalPartitions() + + // Refresh HMC + if(numberOfRuns % 5) { + scanHmc() + } + + // Stop after some time + if(numberOfRuns > 15) { + keepRunning = false + } + + numberOfRuns++ + Thread.sleep(60 * 1000) + } + + hmc?.logoff() + influx?.logoff() + } + } \ No newline at end of file diff --git a/src/main/groovy/biz/nellemann/hmci/HmcClient.groovy b/src/main/groovy/biz/nellemann/hmci/HmcClient.groovy index 6b365fe..0551592 100644 --- a/src/main/groovy/biz/nellemann/hmci/HmcClient.groovy +++ b/src/main/groovy/biz/nellemann/hmci/HmcClient.groovy @@ -142,8 +142,8 @@ class HmcClient { * @param UUID of managed system * @return */ - Map getLogicalPartitionsForManagedSystemWithId(String systemId) { - URL url = new URL(String.format("%s/rest/api/uom/ManagedSystem/%s/LogicalPartition", baseUrl, systemId)) + Map getLogicalPartitionsForManagedSystem(ManagedSystem system) { + URL url = new URL(String.format("%s/rest/api/uom/ManagedSystem/%s/LogicalPartition", baseUrl, system.id)) Response response = getResponse(url) String responseBody = response.body.string() @@ -156,9 +156,9 @@ class HmcClient { content.LogicalPartition.each { partition -> LogicalPartition logicalPartition = new LogicalPartition( partition.PartitionUUID as String, - systemId as String, partition.PartitionName as String, - partition.PartitionType as String + partition.PartitionType as String, + system ) partitionMap.put(logicalPartition.id, logicalPartition) log.debug("getLogicalPartitionsForManagedSystem() - Found partition: " + logicalPartition.toString()) @@ -176,9 +176,9 @@ class HmcClient { * @param systemId * @return */ - String getPcmDataForManagedSystem(String systemId) { - log.debug("getPcmDataForManagedSystem() - " + systemId) - URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?NoOfSamples=0", baseUrl, systemId)) + String getPcmDataForManagedSystem(ManagedSystem system) { + log.debug("getPcmDataForManagedSystem() - " + system.id) + URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/ProcessedMetrics?NoOfSamples=1", baseUrl, system.id)) Response response = getResponse(url) String responseBody = response.body.string() @@ -203,10 +203,10 @@ class HmcClient { * @param partitionId * @return */ - String getPcmDataForLogicalPartition(String systemId, String partitionId) { + String getPcmDataForLogicalPartition(LogicalPartition partition) { - log.debug(String.format("getPcmDataForLogicalPartition() - %s @ %s", partitionId, systemId)) - URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/LogicalPartition/%s/ProcessedMetrics?NoOfSamples=0", baseUrl, systemId, partitionId)) + log.debug(String.format("getPcmDataForLogicalPartition() - %s @ %s", partition.id, partition.system.id)) + URL url = new URL(String.format("%s/rest/api/pcm/ManagedSystem/%s/LogicalPartition/%s/ProcessedMetrics?NoOfSamples=1", baseUrl, partition.system.id, partition.id)) Response response = getResponse(url) String responseBody = response.body.string() diff --git a/src/main/groovy/biz/nellemann/hmci/InfluxClient.groovy b/src/main/groovy/biz/nellemann/hmci/InfluxClient.groovy index cf470fb..4a291d8 100644 --- a/src/main/groovy/biz/nellemann/hmci/InfluxClient.groovy +++ b/src/main/groovy/biz/nellemann/hmci/InfluxClient.groovy @@ -1,5 +1,6 @@ package biz.nellemann.hmci + import groovy.util.logging.Slf4j import org.influxdb.dto.BatchPoints @@ -118,42 +119,184 @@ class InfluxClient { + VIOS */ + getSystemMemory(system, timestamp).each { + batchPoints.point(it) + } - batchPoints.point(getSystemMemory(system, timestamp)); - batchPoints.point(getSystemProcessor(system, timestamp)); + getSystemProcessor(system, timestamp).each { + batchPoints.point(it) + } + + getSystemSharedProcessorPools(system, timestamp).each { + batchPoints.point(it) + } influxDB.write(batchPoints); } + void writeLogicalPartition(LogicalPartition partition) { - private static Point getSystemMemory(ManagedSystem system, Instant timestamp) { - - Point.Builder point1Builder = Point.measurement("SystemMemory") - .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) - .tag("name", system.name) - - Map memoryMap = system.getMemoryMetrics() - memoryMap.each {fieldName, fieldValue -> - point1Builder.addField(fieldName, fieldValue) + if(partition.metrics == null) { + log.warn("writeLogicalPartition() - null metrics, skipping") + return } - return point1Builder.build(); + Instant timestamp = partition.getTimestamp() + if(!timestamp) { + log.warn("writeLogicalPartition() - no timestamp, skipping") + return + } + + BatchPoints batchPoints = BatchPoints + .database(database) + .build(); + + getPartitionMemory(partition, timestamp).each { + batchPoints.point(it) + } + + getPartitionProcessor(partition, timestamp).each { + batchPoints.point(it) + } + + getPartitionVirtualEthernetAdapter(partition, timestamp).each { + batchPoints.point(it) + } + + influxDB.write(batchPoints); } - private static Point getSystemProcessor(ManagedSystem system, Instant timestamp) { + private static List getSystemMemory(ManagedSystem system, Instant timestamp) { - Point.Builder point1Builder = Point.measurement("SystemProcessor") - .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) - .tag("name", system.name) + Map map = system.getMemoryMetrics() + List pointList = map.collect {fieldName, fieldValue -> - Map memoryMap = system.getProcessorMetrics() - memoryMap.each {fieldName, fieldValue -> - point1Builder.addField(fieldName, fieldValue) + return Point.measurement("SystemMemory") + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("system", system.name) + .tag("name", fieldName.capitalize()) // The dashboard expects it + .addField("value", fieldValue) + .build() } - return point1Builder.build(); + return pointList; } + + private static List getSystemProcessor(ManagedSystem system, Instant timestamp) { + + Map map = system.getProcessorMetrics() + List pointList = map.collect {fieldName, fieldValue -> + + return Point.measurement("SystemProcessor") + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("system", system.name) + .tag("name", fieldName.capitalize()) // The dashboard expects it + .addField("value", fieldValue) + .build() + } + + return pointList; + } + + + private static List getSystemSharedProcessorPools(ManagedSystem system, Instant timestamp) { + + List pointList + system.getSharedProcessorPools().each {name, map -> + //log.debug(name) // Pool name + + pointList = map.collect { fieldName, fieldValue -> + + return Point.measurement("SystemSharedProcessorPool") + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("system", system.name) + .tag("pool", name) + .tag("name", fieldName) + .addField("value", fieldValue) + .build() + } + + } + + return pointList; + } + + + + + private static List getPartitionMemory(LogicalPartition partition, Instant timestamp) { + + Map map = partition.getMemoryMetrics() + List pointList = map.collect {fieldName, fieldValue -> + + return Point.measurement("PartitionMemory") + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("partition", partition.name) + .tag("system", partition.system.name) + .tag("name", fieldName.capitalize()) // The dashboard expects it + .addField("value", fieldValue) + .build() + } + + return pointList; + } + + + private static List getPartitionProcessor(LogicalPartition partition, Instant timestamp) { + + Map map = partition.getProcessorMetrics() + List pointList = map.collect {fieldName, fieldValue -> + + return Point.measurement("PartitionProcessor") + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("partition", partition.name) + .tag("system", partition.system.name) + .tag("name", fieldName.capitalize()) // The dashboard expects it + .addField("value", fieldValue) + .build() + } + + return pointList; + } + + + + private static List getPartitionVirtualEthernetAdapter(LogicalPartition partition, Instant timestamp) { + List metrics = partition.getVirtualEthernetAdapterMetrics() + return processMeasurementMap(metrics, timestamp, "PartitionVirtualEthernetAdapters") + } + + + + private static List processMeasurementMap(List listOfMaps, Instant timestamp, String measurement) { + + List list = new ArrayList<>() + + listOfMaps.each { map -> + + // Iterate fields + map.get("fields").each { String fieldName, BigDecimal fieldValue -> + + Point.Builder builder = Point.measurement(measurement) + .time(timestamp.toEpochMilli(), TimeUnit.MILLISECONDS) + .tag("name", fieldName) + .addField("value", fieldValue) + + // For each field, we add all tags + map.get("tags").each { String tagName, String tagValue -> + builder.tag(tagName, tagValue) + } + + list.add(builder.build()) + } + + } + + return list + } + + } diff --git a/src/main/groovy/biz/nellemann/hmci/LogicalPartition.groovy b/src/main/groovy/biz/nellemann/hmci/LogicalPartition.groovy index c9f6805..bf154e8 100644 --- a/src/main/groovy/biz/nellemann/hmci/LogicalPartition.groovy +++ b/src/main/groovy/biz/nellemann/hmci/LogicalPartition.groovy @@ -8,17 +8,85 @@ class LogicalPartition extends MetaSystem { public String id public String name public String type - public String systemId + ManagedSystem system - LogicalPartition(String id, String systemId, String name, String type) { + LogicalPartition(String id, String name, String type, ManagedSystem system) { this.id = id - this.systemId = systemId this.name = name this.type = type + this.system = system } String toString() { return "[${id}] ${name} (${type})" } + + Map getMemoryMetrics() { + + HashMap map = [ + logicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.logicalMem.first(), + backedPhysicalMem: metrics.systemUtil.utilSamples.first().lparsUtil.first().memory.backedPhysicalMem.first(), + ] + + return map + } + + + Map getProcessorMetrics() { + + HashMap map = [ + utilizedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedProcUnits.first(), + maxVirtualProcessors: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.maxVirtualProcessors.first(), + currentVirtualProcessors: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.currentVirtualProcessors.first(), + donatedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.donatedProcUnits.first(), + entitledProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.entitledProcUnits.first(), + idleProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.idleProcUnits.first(), + maxProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.maxProcUnits.first(), + utilizedCappedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedCappedProcUnits.first(), + utilizedUncappedProcUnits: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.utilizedUncappedProcUnits.first(), + timePerInstructionExecution: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.timeSpentWaitingForDispatch.first(), + timeSpentWaitingForDispatch: metrics.systemUtil.utilSamples.first().lparsUtil.first().processor.timePerInstructionExecution.first(), + ] + + return map + } + + + // PartitionVSCSIAdapters - VIOS? + + // PartitionVirtualEthernetAdapters + // PartitionVirtualFiberChannelAdapters + + + List getVirtualEthernetAdapterMetrics() { + + List list = new ArrayList<>() + Map map = new HashMap() + metrics.systemUtil.utilSamples.first().lparsUtil.first().network?.virtualEthernetAdapters?.each { + + HashMap tagsMap = [ + system: system.name, + partition: name, + sea: it.sharedEthernetAdapterId, + viosId: it.viosId, + vlanId: it.vlanId, + vswitchId: it.vswitchId, + ] + map.put("tags", tagsMap) + + HashMap fieldsMap = [ + receivedPhysicalBytes: it.receivedPhysicalBytes.first(), + sentPhysicalBytes: it.sentPhysicalBytes.first(), + receivedBytes: it.receivedBytes.first(), + sentBytes: it.sentBytes.first(), + ] + map.put(it.physicalLocation, fieldsMap) + + list.add(map) + } + + return list + } + } diff --git a/src/main/groovy/biz/nellemann/hmci/ManagedSystem.groovy b/src/main/groovy/biz/nellemann/hmci/ManagedSystem.groovy index c44ce74..9461922 100644 --- a/src/main/groovy/biz/nellemann/hmci/ManagedSystem.groovy +++ b/src/main/groovy/biz/nellemann/hmci/ManagedSystem.groovy @@ -12,6 +12,8 @@ class ManagedSystem extends MetaSystem { public String model public String serialNumber + // From PCM Data + ManagedSystem(String id, String name, String type, String model, String serialNumber) { this.id = id @@ -25,14 +27,23 @@ class ManagedSystem extends MetaSystem { return "[${id}] ${name} (${type}-${model} ${serialNumber})" } + + Object getMetrics(String metric) { + switch (metric) { + case "SystemSharedProcessorPool": + return getSharedProcessorPools() + break + + } + } Map getMemoryMetrics() { HashMap map = [ - totalMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.totalMem.first(), - availableMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.availableMem.first(), - configurableMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.configurableMem.first(), - assignedMemToLpars: metrics.systemUtil.utilSamples.first().serverUtil.memory.assignedMemToLpars.first() + totalMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.totalMem.first(), + availableMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.availableMem.first(), + configurableMem: metrics.systemUtil.utilSamples.first().serverUtil.memory.configurableMem.first(), + assignedMemToLpars: metrics.systemUtil.utilSamples.first().serverUtil.memory.assignedMemToLpars.first(), ] return map @@ -42,10 +53,47 @@ class ManagedSystem extends MetaSystem { Map getProcessorMetrics() { HashMap map = [ - availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.availableProcUnits.first(), + availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.totalProcUnits.first(), + utilizedProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.utilizedProcUnits.first(), + availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.availableProcUnits.first(), + configurableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.processor.configurableProcUnits.first(), ] return map } + Map getPhysicalProcessorPool() { + + HashMap map = [ + assignedProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.physicalProcessorPool.assignedProcUnits.first(), + availableProcUnits: metrics.systemUtil.utilSamples.first().serverUtil.physicalProcessorPool.availableProcUnits.first(), + ] + + return map + } + + + + Map> getSharedProcessorPools() { + + Map map = new HashMap() + metrics.systemUtil.utilSamples.first().serverUtil.sharedProcessorPool.each { + + HashMap innerMap = [ + assignedProcUnits: it.assignedProcUnits.first(), + availableProcUnits: it.availableProcUnits.first(), + ] + map.put(it.name, innerMap) + } + return map + } + + + // SystemSharedAdapters + // SystemGenericPhysicalAdapters + // SystemGenericVirtualAdapters + // SystemGenericPhysicalAdapters + // SystemGenericAdapters + // SystemFiberChannelAdapters + } diff --git a/src/main/groovy/biz/nellemann/hmci/MetaSystem.groovy b/src/main/groovy/biz/nellemann/hmci/MetaSystem.groovy index 3f54a39..0a81f4e 100644 --- a/src/main/groovy/biz/nellemann/hmci/MetaSystem.groovy +++ b/src/main/groovy/biz/nellemann/hmci/MetaSystem.groovy @@ -32,5 +32,4 @@ class MetaSystem { return instant } - } diff --git a/src/main/groovy/biz/nellemann/hmci/pojo/ServerUtil.groovy b/src/main/groovy/biz/nellemann/hmci/pojo/ServerUtil.groovy index 3ce480b..6c4f29a 100644 --- a/src/main/groovy/biz/nellemann/hmci/pojo/ServerUtil.groovy +++ b/src/main/groovy/biz/nellemann/hmci/pojo/ServerUtil.groovy @@ -8,6 +8,6 @@ class ServerUtil { ServerProcessor processor ServerMemory memory PhysicalProcessorPool physicalProcessorPool - SharedProcessorPool sharedProcessorPool + List sharedProcessorPool } diff --git a/src/test/groovy/biz/nellemann/hmci/HmcClientTest.groovy b/src/test/groovy/biz/nellemann/hmci/HmcClientTest.groovy index 2c60afe..0d6fd90 100644 --- a/src/test/groovy/biz/nellemann/hmci/HmcClientTest.groovy +++ b/src/test/groovy/biz/nellemann/hmci/HmcClientTest.groovy @@ -45,7 +45,7 @@ class HmcClientTest extends Specification { when: ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N") - Map partitions = hmc.getLogicalPartitionsForManagedSystemWithId(system.id) + Map partitions = hmc.getLogicalPartitionsForManagedSystem(system) then: partitions.size() == 12 diff --git a/src/test/groovy/biz/nellemann/hmci/LogicalPartitionTest.groovy b/src/test/groovy/biz/nellemann/hmci/LogicalPartitionTest.groovy index 2e89c23..cf3c966 100644 --- a/src/test/groovy/biz/nellemann/hmci/LogicalPartitionTest.groovy +++ b/src/test/groovy/biz/nellemann/hmci/LogicalPartitionTest.groovy @@ -12,7 +12,8 @@ class LogicalPartitionTest extends Specification { def testJson = testFile.getText('UTF-8') when: - LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "e09834d1-c930-3883-bdad-405d8e26e166", "9Flash01", "OS400") + ManagedSystem system = new ManagedSystem("e09834d1-c930-3883-bdad-405d8e26e166", "Test Name","Test Type", "Test Model", "Test S/N") + LogicalPartition lpar = new LogicalPartition("2DE05DB6-8AD5-448F-8327-0F488D287E82", "9Flash01", "OS400", system) lpar.processMetrics(testJson) then: