Merge pull request 'Support for InfluxDB 2.x (now requires 1.8 or later)' (#2) from influxdb2 into main
continuous-integration/drone/push Build is passing Details

Reviewed-on: #2
This commit is contained in:
Mark Nellemann 2023-05-25 13:23:01 +00:00
commit 9468c1b695
9 changed files with 132 additions and 61 deletions

View File

@ -1,3 +1,7 @@
# Changelog
All notable changes to this project will be documented in this file.
## 0.1.1 - 2023-05-20
- Support for InfluxDB v2, now requires InfluxDB 1.8 or later

View File

@ -35,7 +35,7 @@ There are few steps in the installation.
### 2 - InfluxDB and Grafana Installation
Install InfluxDB (v. **1.8.x** or **1.9.x** for best compatibility with Grafana) on a host which is network accessible by the SVCi utility (the default InfluxDB port is 8086). You can install Grafana on the same server or any server which are able to connect to the InfluxDB database. The Grafana installation needs to be accessible from your browser (default on port 3000). The default settings for both InfluxDB and Grafana will work fine as a start.
Install InfluxDB (v. **1.8** or later) on a host which is network accessible by the SVCi utility (the default InfluxDB port is 8086). You can install Grafana on the same server or any server which are able to connect to the InfluxDB database. The Grafana installation needs to be accessible from your browser (default on port 3000). The default settings for both InfluxDB and Grafana will work fine as a start.
- Create the empty *svci* database by running the **influx** CLI command and type:
@ -98,6 +98,19 @@ systemctl restart svci
journalctl -f -u svci # to check log output
```
### AIX Notes
To install (or upgrade) on AIX, you need to pass the *--ignoreos* flag to the *rpm* command:
```shell
rpm -Uvh --ignoreos svci-x.y.z-n.noarch.rpm
```
## Screenshots
Screenshots of the provided Grafana dashboard can be found in the [doc/screenshots/](doc/screenshots) folder.
## Development Information
@ -114,9 +127,9 @@ Use the gradle build tool, which will download all required dependencies:
### Local Testing
#### InfluxDB
#### InfluxDB v1.x
Start the InfluxDB container:
Start a InfluxDB container:
```shell
docker run --name=influxdb --rm -d -p 8086:8086 influxdb:1.8
@ -128,10 +141,21 @@ Create the *svci* database:
docker exec -i influxdb influx -execute "CREATE DATABASE svci"
```
#### InfluxDB v2.x
Start a InfluxDB container:
```shell
docker run --name=influxdb --rm -d -p 8086:8086 influxdb:latest
```
- Then use the Web UI to create an initial user (for the web UI), an organization and bucket: http://localhost:8086/
- Then create an API token with RW access to your bucket.
#### Grafana
Start the Grafana container, linking it to the InfluxDB container:
Start a Grafana container, linking it to the InfluxDB container:
```shell
docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/grafana
@ -139,4 +163,6 @@ docker run --name grafana --link influxdb:influxdb --rm -d -p 3000:3000 grafana/
Setup Grafana to connect to the InfluxDB container by defining a new datasource on URL *http://influxdb:8086* named *svci*.
Import dashboards from the [doc/dashboards/](doc/dashboards/) folder.
If you are [connecting](https://docs.influxdata.com/influxdb/v2.7/tools/grafana/) to InfluxDB v2.x, then add a custom http header, enter bucket as database and disable authorization.
- Authorization = Token abcdef_random_token_from_nfluxdb==
- Import dashboards from the [doc/dashboards/](doc/dashboards/) folder.

View File

@ -2,13 +2,11 @@ plugins {
id 'java'
id 'groovy'
id 'application'
// Code coverage of tests
id 'jacoco'
id "net.nemerosa.versioning" version "2.15.1"
id "com.github.johnrengelman.shadow" version "7.1.2"
id "com.netflix.nebula.ospackage" version "10.0.0"
id "com.netflix.nebula.ospackage" version "11.2.0"
}
repositories {
@ -22,18 +20,16 @@ version = projectVersion
dependencies {
annotationProcessor 'info.picocli:picocli-codegen:4.7.3'
implementation 'info.picocli:picocli:4.7.3'
implementation 'org.influxdb:influxdb-java:2.23'
//implementation 'com.influxdb:influxdb-client-java:6.7.0'
implementation 'com.influxdb:influxdb-client-java:6.8.0'
implementation 'org.slf4j:slf4j-api:2.0.7'
implementation 'org.slf4j:slf4j-simple:2.0.7'
implementation 'com.squareup.okhttp3:okhttp:4.10.0' // Also used by InfluxDB Client
//implementation "org.eclipse.jetty:jetty-client:9.4.49.v20220914"
implementation 'com.fasterxml.jackson.core:jackson-databind:2.14.2'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.14.2'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-toml:2.14.2'
implementation 'com.fasterxml.jackson.core:jackson-databind:2.14.3'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-xml:2.14.3'
implementation 'com.fasterxml.jackson.dataformat:jackson-dataformat-toml:2.14.3'
testImplementation 'junit:junit:4.13.2'
testImplementation 'org.spockframework:spock-core:2.3-groovy-3.0'
testImplementation 'org.spockframework:spock-core:2.3-groovy-4.0'
testImplementation "org.mock-server:mockserver-netty-no-dependencies:5.14.0"
}

View File

@ -3,13 +3,28 @@
###
### Define one InfluxDB to save metrics into
### There must be only one and it should be named [influx]
###
# InfluxDB v1.x example
#[influx]
#url = "http://localhost:8086"
#username = "root"
#password = ""
#database = "svci"
# InfluxDB v2.x example
[influx]
url = "http://localhost:8086"
username = "root"
password = ""
database = "svci"
org = "myOrg"
token = "rAnd0mT0k3nG3neRaT3dByInF1uxDb=="
bucket = "svci"
###
### Define one or more SVC's to query for metrics
### Each entry must be named [svc.<something-unique>]
###
###

View File

@ -1,3 +1,3 @@
projectId = svci
projectGroup = biz.nellemann.svci
projectVersion = 0.0.3
projectVersion = 0.1.1

View File

@ -16,10 +16,12 @@
package biz.nellemann.svci;
import biz.nellemann.svci.dto.toml.InfluxConfiguration;
import org.influxdb.BatchOptions;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.Point;
import com.influxdb.client.InfluxDBClient;
import com.influxdb.client.InfluxDBClientFactory;
import com.influxdb.client.WriteApi;
import com.influxdb.client.WriteOptions;
import com.influxdb.client.domain.WritePrecision;
import com.influxdb.client.write.Point;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -35,23 +37,35 @@ public final class InfluxClient {
private final static Logger log = LoggerFactory.getLogger(InfluxClient.class);
final private String url;
final private String username;
final private String password;
final private String database;
final private String org; // v2 only
final private String token;
final private String bucket; // Bucket in v2, Database in v1
private InfluxDB influxDB;
private InfluxDBClient influxDBClient;
private WriteApi writeApi;
InfluxClient(InfluxConfiguration config) {
this.url = config.url;
this.username = config.username;
this.password = config.password;
this.database = config.database;
if(config.org != null) {
this.org = config.org;
} else {
this.org = "svci"; // In InfluxDB 1.x, there is no concept of organization.
}
if(config.token != null) {
this.token = config.token;
} else {
this.token = config.username + ":" + config.password;
}
if(config.bucket != null) {
this.bucket = config.bucket;
} else {
this.bucket = config.database;
}
}
synchronized void login() throws RuntimeException, InterruptedException {
if(influxDB != null) {
if(influxDBClient != null) {
return;
}
@ -61,20 +75,20 @@ public final class InfluxClient {
do {
try {
log.debug("Connecting to InfluxDB - {}", url);
influxDB = InfluxDBFactory.connect(url, username, password).setDatabase(database);
influxDB.version(); // This ensures that we actually try to connect to the db
influxDBClient = InfluxDBClientFactory.create(url, token.toCharArray(), org, bucket);
influxDBClient.version(); // This ensures that we actually try to connect to the db
Runtime.getRuntime().addShutdownHook(new Thread(influxDBClient::close));
influxDB.enableBatch(
BatchOptions.DEFAULTS
.threadFactory(runnable -> {
Thread thread = new Thread(runnable);
thread.setDaemon(true);
return thread;
})
);
Runtime.getRuntime().addShutdownHook(new Thread(influxDB::close));
// Todo: Handle events - https://github.com/influxdata/influxdb-client-java/tree/master/client#handle-the-events
//writeApi = influxDBClient.makeWriteApi();
writeApi = influxDBClient.makeWriteApi(
WriteOptions.builder()
.bufferLimit(20_000)
.flushInterval(5_000)
.build());
connected = true;
} catch(Exception e) {
sleep(15 * 1000);
if(loginErrors++ > 3) {
@ -90,29 +104,32 @@ public final class InfluxClient {
synchronized void logoff() {
if(influxDB != null) {
influxDB.close();
if(influxDBClient != null) {
influxDBClient.close();
}
influxDB = null;
influxDBClient = null;
}
public void write(List<Measurement> measurements, Instant timestamp, String measurement) {
log.debug("write() - measurement: {} {}", measurement, measurements.size());
processMeasurementMap(measurements, timestamp, measurement).forEach( (point) -> { influxDB.write(point); });
public void write(List<Measurement> measurements, String name) {
log.debug("write() - measurement: {} {}", name, measurements.size());
if(!measurements.isEmpty()) {
processMeasurementMap(measurements, name).forEach((point) -> {
writeApi.writePoint(point);
});
}
}
private List<Point> processMeasurementMap(List<Measurement> measurements, Instant timestamp, String measurement) {
private List<Point> processMeasurementMap(List<Measurement> measurements, String name) {
List<Point> listOfPoints = new ArrayList<>();
measurements.forEach( (m) -> {
Point.Builder builder = Point.measurement(measurement)
.time(timestamp.getEpochSecond(), TimeUnit.SECONDS)
.tag(m.tags)
.fields(m.fields);
listOfPoints.add(builder.build());
log.trace("processMeasurementMap() - timestamp: {}, tags: {}, fields: {}", m.timestamp, m.tags, m.fields);
Point point = new Point(name)
.time(m.timestamp.getEpochSecond(), WritePrecision.S)
.addTags(m.tags)
.addFields(m.fields);
listOfPoints.add(point);
});
return listOfPoints;
}

View File

@ -15,14 +15,23 @@
*/
package biz.nellemann.svci;
import java.time.Instant;
import java.util.Map;
public class Measurement {
final Instant timestamp;
final Map<String, String> tags;
final Map<String, Object> fields;
Measurement(Map<String, String> tags, Map<String, Object> fields) {
this.timestamp = Instant.now();
this.tags = tags;
this.fields = fields;
}
Measurement(Instant timestamp, Map<String, String> tags, Map<String, Object> fields) {
this.timestamp = timestamp;
this.tags = tags;
this.fields = fields;
}

View File

@ -94,10 +94,10 @@ class VolumeController implements Runnable {
void refresh() {
log.debug("refresh()");
influxClient.write(getSystem(), Instant.now(),"system");
influxClient.write(getNodeStats(), Instant.now(),"node_stats");
influxClient.write(getEnclosureStats(), Instant.now(),"enclosure_stats");
influxClient.write(getMDiskGroups(), Instant.now(),"m_disk_groups");
influxClient.write(getSystem(),"system");
influxClient.write(getNodeStats(),"node_stats");
influxClient.write(getEnclosureStats(),"enclosure_stats");
influxClient.write(getMDiskGroups(), "m_disk_groups");
}

View File

@ -3,6 +3,10 @@ package biz.nellemann.svci.dto.toml;
public class InfluxConfiguration {
public String url;
public String org;
public String token;
public String bucket;
public String username;
public String password;
public String database;