InfluxDB入门指南五-InfluxDB基本操作方法封装
一、前言
上面几篇文章中,介绍了Influx在Linux和Windows上的使用之后,本节开始介绍Influx在Java中的使用,先提供一个InfluxDB Java API 封装的工具类,方便大家直接上手使用。
二、InfluxDB工具类
2.1 导入依赖包
使用maven工具导入如下依赖的jar包:
<dependency>
<groupId>org.influxdb</groupId>
<artifactId>influxdb-java</artifactId>
<version>2.10</version>
</dependency>
2.2 编写工具类代码
package com.common.utils.influxdb;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.influxdb.InfluxDB;
import org.influxdb.InfluxDB.ConsistencyLevel;
import org.influxdb.InfluxDBFactory;
import org.influxdb.dto.BatchPoints;
import org.influxdb.dto.Point;
import org.influxdb.dto.Point.Builder;
import org.influxdb.dto.Pong;
import org.influxdb.dto.Query;
import org.influxdb.dto.QueryResult;
@Service("tsdbService")
public class TsdbServiceImpl implements TsdbService{
private static final Logger logger = LoggerFactory.getLogger(TsdbServiceImpl.class);
private static final InfluxDB.ConsistencyLevel CONSISTENCY_LEVEL = InfluxDB.ConsistencyLevel.ANY;
private static final TimeUnit PRECESION = TimeUnit.SECONDS;
@Value("${tsdb.server.hosts}")
private String hosts;
@Value("${tsdb.server.port}")
private String port;
/**
* 用户名
*/
@Value("${tsdb.server.username}")
private String username;
/**
* 密码
*/
@Value("${tsdb.server.password}")
private String password;
/**
* 数据库
*/
@Value("${tsdb.server.database}")
private String database;
/**
* 保留策略
*/
@Value("${tsdb.server.retentionpolicy}")
private String retentionPolicy;
private InfluxDB influxDB;
@PostConstruct
public void init() {
List<String> serverAddressList = new ArrayList<>();
for (String host : hosts.split(",")) {
serverAddressList.add(String.format("%s:%s", host, port));
}
influxDB = InfluxDBFactory.connect(serverAddressList, username, password);
try {
// 如果指定的数据库不存在,则新建一个新的数据库,并新建一个默认的数据保留规则
if (!this.databaseExist(database)) {
createDatabase(database);
createRetentionPolicy();
}
} catch (Exception e) {
// 该数据库可能设置动态代理,不支持创建数据库
logger.error("[TsdbService] occur error when init tsdb, err msg: {}", e);
} finally {
influxDB.setRetentionPolicy(retentionPolicy);
}
influxDB.setLogLevel(InfluxDB.LogLevel.NONE);
// Flush every 1000 Points, at least every 100ms
// bufferLimit represents the maximum number of points can stored in the retry buffer
// exceptionHandler represents a consumer function to handle asynchronous errors
// threadFactory represents the ThreadFactory instance to be used
influxDB.enableBatch(BatchOptions.DEFAULTS
.actions(1000)
.flushDuration(100)
.bufferLimit(10)
.exceptionHandler((points, e) -> {
List<Point> target = new ArrayList<>();
points.forEach(target::add);
String msg = String.format("failed to write points:%s\n", target.toString().substring(0, 10000));
logger.error(msg, e);
})
.threadFactory(
Executors.defaultThreadFactory()
));
}
/**
* 测试连接是否正常
*
* @return true 正常
*/
public boolean ping() {
boolean isConnected = false;
Pong pong;
try {
pong = influxDB.ping();
if (pong != null) {
isConnected = true;
}
} catch (Exception e) {
e.printStackTrace();
}
return isConnected;
}
@Override
public void createDatabase(String database) {
influxDB.query(new Query("CREATE DATABASE " + database, ""));
}
@Override
public void dropDatabase(String database) {
influxDB.query(new Query("DROP DATABASE " + database, ""));
}
@Override
public boolean databaseExist(String database) {
return influxDB.databaseExists(database);
}
@Override
public void createRetentionPolicy() {
String command = String.format("CREATE RETENTION POLICY \"%s\" ON \"%s\" DURATION %s REPLICATION %s DEFAULT",
"default_policy", database, "90d", 3);
this.query(command);
}
@Override
public void createRetentionPolicy(String database, String policyName, String duration, int replication, Boolean isDefault) {
String sql = String.format("CREATE RETENTION POLICY \"%s\" ON \"%s\" DURATION %s REPLICATION %s ", policyName,
database, duration, replication);
if (isDefault) {
sql = sql.concat(" DEFAULT");
}
this.query(sql);
}
@Override
public void dropRetentionPolicy() {
this.dropRetentionPolicy(database, retentionPolicy);
}
@Override
public void dropRetentionPolicy(String database, String retentionPolicy) {
String sql = String.format("DROP RETENTION POLICY %s ON %s", retentionPolicy, database);
this.query(sql);
}
@Override
public void createContinuousQuery(String measurement) {
String cqName = String.format("cq_%s", measurement);
String originMeasurement = String.format("%s.%s.%s", database, retentionPolicy, measurement);
String cqMeasurement = String.format("%s.%s.%s_hour", database, extendPolicy, measurement);
String sql = String.format("CREATE CONTINUOUS QUERY \"%s\" ON %s RESAMPLE EVERY 1h FOR 2h BEGIN SELECT MEAN(*) INTO %s FROM %s GROUP BY time(1h),* FILL(none) END",
cqName, database, cqMeasurement, originMeasurement);
this.query(sql);
}
@Override
public boolean continuousQueryExists(String measurement) {
String cqName = String.format("cq_%s", measurement);
return continuousQueryExists(database, cqName);
}
@Override
public boolean continuousQueryExists(String database, String cqName) {
String sql = "SHOW CONTINUOUS QUERIES";
QueryResult result = query(sql);
List<QueryResult.Series> seriesList = result.getResults().get(0).getSeries();
if (seriesList != null) {
for (QueryResult.Series series : seriesList) {
if (database.equals(series.getName())) {
List<List<Object>> continuousQueryList = series.getValues();
if (continuousQueryList == null) {
return false;
} else {
for (List<Object> queryResult : continuousQueryList) {
if (cqName.equals(queryResult.get(0))) {
return true;
}
}
}
}
}
}
return false;
}
@Override
public void dropContinuousQuery(String databaseName, String cqName) {
String sql = String.format("DROP CONTINUOUS QUERY %s ON %s", cqName, databaseName);
QueryResult result = query(sql);
}
@Override
public boolean measurementsExists(String measurement) {
return measurementsExists(database, measurement);
}
@Override
public boolean measurementsExists(String database, String measurement) {
String sql = String.format("SHOW MEASUREMENTS ON %s", database);
QueryResult result = query(sql);
if (result != null) {
List<QueryResult.Series> seriesList = result.getResults().get(0).getSeries();
if (seriesList != null) {
QueryResult.Series series = seriesList.get(0);
List<List<Object>> valueList = series.getValues();
for (List<Object> value : valueList) {
if (measurement.equals(value.get(0))) {
return true;
}
}
}
}
return false;
}
@Override
public QueryResult query(String command) {
return influxDB.query(new Query(command, database));
}
@Override
public QueryResult dataQuery(String command) {
return influxDB.query(new Query(command, database), TimeUnit.MILLISECONDS);
}
@Override
public void insert(Point point1) {
influxDB.write(point1);
}
@Override
public void insert(String measurement, TimeUnit timeUnit, UniteMetricData data) {
timeUnit = timeUnit == null ? TimeUnit.MILLISECONDS : timeUnit;
Point point = pointBuilder(measurement, data.getTags(), data.getFields(), data.getTimestamp(), timeUnit);
influxDB.write(database, retentionPolicy, point);
}
@Override
public void batchInsert(BatchPoints batchPoints) {
influxDB.write(batchPoints);
}
@Override
public Point pointBuilder(String measurement,
Map<String, String> tags,
Map<String, Object> fields,
long time,
TimeUnit timeunit) {
Point point = Point.measurement(measurement).time(time, timeunit).tag(tags).fields(fields).build();
return point;
}
@Override
public BatchPoints buildBatchPoints() {
return this.batchPointsBuilder(database, CONSISTENCY_LEVEL, PRECESION);
}
@Override
public BatchPoints batchPointsBuilder(String database, InfluxDB.ConsistencyLevel level, TimeUnit precision) {
return batchPointsBuilder(database, level, precision, null);
}
@Override
public BatchPoints batchPointsBuilder(String database, InfluxDB.ConsistencyLevel level, TimeUnit precision, String retentionPolicy) {
return BatchPoints.database(database).consistency(level).precision(precision).retentionPolicy(retentionPolicy).build();
}
三、使用示例
3.0 连通性测试
/**
* TSDB访问连通性检查,置于其他所有测试之前
*/
@Before
public void connectionTest() {
boolean connected = tsdbService.ping();
assertTrue(connected);
}
3.1 查询数据
InfluxDB支持一次查询多个SQL,SQL之间用逗号隔开即可。下面只演示下,只有一条SQL的情况下,怎么解析查询返回的结果集。
@Resource
private TsdbService tsdbService;
// 自测环境TSDB地址
private static final String MASTER_URL = "10.185.3.150:8091";
private static final String USERNAME = "root";
private static final String PASSWORD = "root";
private static final String DATABASE_NAME = "ncm_test_temp";
private static final String RP_NAME = "default_policy";
//测试数据表名,通常以namespace来命名
private static final String MEASUREMENT_NAME = "NVS";
private static final Integer QUERY_LIMIT = 10;
/**
* 批量查询操作单元测试
*/
@Test
public void batchQueryTest() {
long beginTime = 1559613845000L;
long endTime = 1559621045000L;
Map<String, String> dimensionMap = new HashMap<>();
dimensionMap.put("tag1", "tag1");
// 带时间范围的查询
String condition = TSDBUtil.getQueryCondition(dimensionMap,beginTime,endTime);
Object[] args = new Object[]{MEASUREMENT_NAME, condition, QUERY_LIMIT};
String command = String.format("SELECT * FROM %s WHERE %s ORDER BY time ASC LIMIT %d", args);
// 执行查询
QueryResult results = tsdbService.dataQuery(command);
if (results.getResults() == null) {
System.out.println("Data is empty");
return;
}
//results.getResults()是同时查询多条SQL语句的返回值
for (QueryResult.Result result : results.getResults()) {
List<QueryResult.Series> series = result.getSeries();
for (QueryResult.Series serie : series) {
List<List<Object>> values = serie.getValues();//字段字集合
List<String> colums = serie.getColumns();//字段名
// 打印查询结果
System.out.println("colums:" + colums);
for (List<Object> value : values) {
System.out.println("value:" + value);
}
// 封装查询结果
List<Map<String, Object>> dataList = new LinkedList<>();
for (int i=0;i<values.size();++i){
Map<String, Object> dataMap=new HashMap<>(colums.size());
for (int j=0;j<colums.size();++j){
dataMap.put(colums.get(j),values.get(i).get(j));
}
dataList.add(dataMap);
}
// dataList即可作为返回给用户的查询数据的基础格式
System.out.println(dataList);
}
}
/**
* 标准化之前:
*
* colums:[time, field1, field2, tag1, tag2]
* value:[1.550599292E12, efs, 444444.0, tag1, tag2]
* value:[1.550595692E12, bcd, 333333.0, tag1, tag2]
* value:[1.550592092E12, abc, 123456.0, tag1, tag2]
*
* 标准化之后:
* [
* {
* tag1=tag1,
* field1=efs,
* time=1.550599292E12,
* field2=444444.0,
* tag2=tag2
* },
* {
* tag1=tag1,
* field1=bcd,
* time=1.550595692E12,
* field2=333333.0,
* tag2=tag2
* },
* {
* tag1=tag1,
* field1=abc,
* time=1.550592092E12,
* field2=123456.0,
* tag2=tag2
* }
* ]
*/
}
取数据的时候,注意空值判断,本例将返回数据先进行判空oneResult.getSeries() != null,然后调用oneResult.getSeries().getValues().get(0)获取到条SQL的返回结果集,然后遍历valueList,取出每条记录中的目标字段值。
InfluxDB封装的结果集有点深,主要是由于支持多条SQL一次性查询,可以提高查询速度,这个地方有别于关系型数据库的使用。
3.2 插入单条数据
InfluxDB的字段类型,由条插入的值得类型决定;tags的类型只能是String型,可以作为索引,提高检索速度。
public static void main(String[] args) {
InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
Map<String, String> tags = new HashMap<String, String>();
tags.put("tag1", "标签值");
Map<String, Object> fields = new HashMap<String, Object>();
fields.put("field1", "String类型");
// 数值型,InfluxDB的字段类型,由天插入的值得类型决定
fields.put("field2", 3.141592657);
// 时间使用毫秒为单位
influxDBConnection.insert("表名", tags, fields, System.currentTimeMillis(), TimeUnit.MILLISECONDS);
}
另外一个测试用例:
public class UniteMetricData implements Serializable {
private static final long serialVersionUID = 8968059029015805484L;
private Map<String, String> tags;
private Map<String, Object> fields;
private long timestamp;
public UniteMetricData(Map<String, String> tags, Map<String, Object> fields, long timestamp) {
this.tags = tags;
this.fields = fields;
this.timestamp = timestamp;
}
public Map<String, String> getTags() {
return tags;
}
public void setTags(Map<String, String> tags) {
this.tags = tags;
}
public Map<String, Object> getFields() {
return fields;
}
public void setFields(Map<String, Object> fields) {
this.fields = fields;
}
public long getTimestamp() {
return timestamp;
}
public void setTimestamp(long timestamp) {
this.timestamp = timestamp;
}
}
@Test
public void writeTest() throws InterruptedException {
Map<String, String> tags = new HashMap<>();
tags.put("host", "ncm-test-01");
tags.put("projectId", "c57212bdec1345cd95107ef3109777");
Map<String, Object> fields = new HashMap<>();
fields.put("cpuUsage", 2.17);
UniteMetricData data = new UniteMetricData(tags, fields, System.currentTimeMillis());
tsdbService.insert(MEASUREMENT_NAME, TimeUnit.MILLISECONDS, data);
Thread.sleep(1000);
QueryResult queryResult = tsdbService.query(String.format("SELECT cpuUsage FROM %s", MEASUREMENT_NAME));
for (QueryResult.Result result : queryResult.getResults()) {
for (QueryResult.Series series : result.getSeries()) {
System.out.println(series.toString());
}
}
}
3.3 批量写入数据的几种方式
注:使用这两种种方式,要就这两条数据都写入到同一数据库下且tag相同,若tag不相同,需将它们放到不同的BatchPoint对象中,否则会出现数据写入错乱问题。
3.3.1 方式一:通过BatchPoints组装数据后,循环插入数据库
public static void main(String[] args) {
InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
Map<String, String> tags = new HashMap<String, String>();
tags.put("tag1", "标签值");
Map<String, Object> fields1 = new HashMap<String, Object>();
fields1.put("field1", "abc");
// 数值型,InfluxDB的字段类型,由天插入的值得类型决定
fields1.put("field2", 123456);
Map<String, Object> fields2 = new HashMap<String, Object>();
fields2.put("field1", "String类型");
fields2.put("field2", 3.141592657);
// 一条记录值
Point point1 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags, fields1);
Point point2 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags, fields2);
// 将两条记录添加到batchPoints中
BatchPoints batchPoints1 = BatchPoints.database("db-test").tag("tag1", "标签值1").retentionPolicy("hour")
.consistency(ConsistencyLevel.ALL).build();
BatchPoints batchPoints2 = BatchPoints.database("db-test").tag("tag2", "标签值2").retentionPolicy("hour")
.consistency(ConsistencyLevel.ALL).build();
batchPoints1.point(point1);
batchPoints2.point(point2);
// 将两条数据批量插入到数据库中
influxDBConnection.batchInsert(batchPoints1);
influxDBConnection.batchInsert(batchPoints2);
}
3.3.2 方式二:通过BatchPoints组装数据,序列化后,一次性插入数据库
public static void main(String[] args) {
InfluxDBConnection influxDBConnection = new InfluxDBConnection("admin", "admin", "1.1.1.1", "db-test", "hour");
Map<String, String> tags1 = new HashMap<String, String>();
tags1.put("tag1", "标签值");
Map<String, String> tags2 = new HashMap<String, String>();
tags2.put("tag2", "标签值");
Map<String, Object> fields1 = new HashMap<String, Object>();
fields1.put("field1", "abc");
// 数值型,InfluxDB的字段类型,由天插入的值得类型决定
fields1.put("field2", 123456);
Map<String, Object> fields2 = new HashMap<String, Object>();
fields2.put("field1", "String类型");
fields2.put("field2", 3.141592657);
// 一条记录值
Point point1 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags1, fields1);
Point point2 = influxDBConnection.pointBuilder("表名", System.currentTimeMillis(), tags2, fields2);
BatchPoints batchPoints1 = BatchPoints.database("db-test").tag("tag1", "标签值1")
.retentionPolicy("hour").consistency(ConsistencyLevel.ALL).build();
// 将两条记录添加到batchPoints中
batchPoints1.point(point1);
BatchPoints batchPoints2 = BatchPoints.database("db-test").tag("tag2", "标签值2")
.retentionPolicy("hour").consistency(ConsistencyLevel.ALL).build();
// 将两条记录添加到batchPoints中
batchPoints2.point(point2);
// 将不同的batchPoints序列化后,一次性写入数据库,提高写入速度
List<String> records = new ArrayList<String>();
records.add(batchPoints1.lineProtocol());
records.add(batchPoints2.lineProtocol());
// 将两条数据批量插入到数据库中
influxDBConnection.batchInsert("db-test", "hour", ConsistencyLevel.ALL, records);
}
方式三:直接调用write方法写入
@Test
public void batchWriteTest() {
Map<String, String> tags = new HashMap<>();
tags.put("tag1", "tag1");
tags.put("tag2", "tag2");
Map<String, Object> fields1 = new HashMap<>();
fields1.put("field1", "abc");
fields1.put("field2", 123456);
Map<String, Object> fields2 = new HashMap<>();
fields2.put("field1", "bcd");
fields2.put("field2", 333333);
Map<String, Object> fields3 = new HashMap<>();
fields3.put("field1", "efs");
fields3.put("field2", 444444);
// 伪造1W条待插入的测试数据
List<UniteMetricData> recordList=new ArrayList<>(10000);
for (int i = 0; i < 10000; ++i) {
UniteMetricData data = new UniteMetricData(tags, fields1, 1559617445000L + random.nextLong() % 1000);
recordList.add(data);
}
long start = System.currentTimeMillis();
for (UniteMetricData data : recordList) {
tsdbService.insert(MEASUREMENT_NAME, TimeUnit.MILLISECONDS, data);
}
long end = System.currentTimeMillis();
System.out.println(String.format("Time used %d ms", end - start));
}
由于在连接创建时开启了本地缓存区,influxdb会执行异步写入,经过测试,批量写入性能不低于上面两种方式,且无需手动构建BathPonint结构,更加简单易用,生产环境中,我们就是使用的这种方式,比较推荐。
3.4 总结
本小节主要介绍了如何使用Java API封装InfluxDB基本的读写操作,希望上述service类能够在大家接入InfluxDB时提供一些便利。
相关文章