目錄
效果圖
先看下
環境準備
JDK:1.8
jmxtrans 安裝包
# 我們系統是CentOS,這裏選擇rpm
https://github.com/downloads/jmxtrans/jmxtrans/jmxtrans-20121016.145842.6a28c97fbb-0.noarch.rpm
# 其他系統在下面鏈接中可以找到
https://github.com/jmxtrans/jmxtrans/downloads
# 下載源碼,後面編譯使用
https://github.com/jmxtrans/jmxtrans/releases
# 我這裏使用的271版本
https://github.com/jmxtrans/jmxtrans/archive/jmxtrans-parent-271.tar.gz
influxdb 安裝包
# 下載安裝包
https://dl.influxdata.com/influxdb/releases/influxdb-1.8.0.x86_64.rpm
# 官網下載頁面
https://portal.influxdata.com/downloads/
Grafana 安裝包
https://dl.grafana.com/oss/release/grafana-6.7.3-1.x86_64.rpm
安裝 influxdb
安裝我們剛剛下載 influxdb rpm文件
rpm -ivh influxdb-1.8.0.x86_64.rpm
查看默認配置
> influxd config
Merging with configuration at: /etc/influxdb/influxdb.conf
reporting-disabled = false
bind-address = "127.0.0.1:8088"
[meta]
dir = "/var/lib/influxdb/meta"
retention-autocreate = true
logging-enabled = true
[data]
dir = "/var/lib/influxdb/data"
index-version = "inmem"
wal-dir = "/var/lib/influxdb/wal"
wal-fsync-delay = "0s"
validate-keys = false
query-log-enabled = true
cache-max-memory-size = 1073741824
cache-snapshot-memory-size = 26214400
cache-snapshot-write-cold-duration = "10m0s"
compact-full-write-cold-duration = "4h0m0s"
compact-throughput = 50331648
compact-throughput-burst = 50331648
max-series-per-database = 1000000
max-values-per-tag = 100000
max-concurrent-compactions = 0
max-index-log-file-size = 1048576
series-id-set-cache-size = 100
series-file-max-concurrent-snapshot-compactions = 0
trace-logging-enabled = false
tsm-use-madv-willneed = false
[coordinator]
write-timeout = "10s"
max-concurrent-queries = 0
query-timeout = "0s"
log-queries-after = "0s"
max-select-point = 0
max-select-series = 0
max-select-buckets = 0
[retention]
enabled = true
check-interval = "30m0s"
[shard-precreation]
enabled = true
check-interval = "10m0s"
advance-period = "30m0s"
[monitor]
store-enabled = true
store-database = "_internal"
store-interval = "10s"
[subscriber]
enabled = true
http-timeout = "30s"
insecure-skip-verify = false
ca-certs = ""
write-concurrency = 40
write-buffer-size = 1000
[http]
enabled = true
bind-address = ":8086"
auth-enabled = false
log-enabled = true
suppress-write-log = false
write-tracing = false
flux-enabled = false
flux-log-enabled = false
pprof-enabled = true
pprof-auth-enabled = false
debug-pprof-enabled = false
ping-auth-enabled = false
https-enabled = false
https-certificate = "/etc/ssl/influxdb.pem"
https-private-key = ""
max-row-limit = 0
max-connection-limit = 0
shared-secret = ""
realm = "InfluxDB"
unix-socket-enabled = false
unix-socket-permissions = "0777"
bind-socket = "/var/run/influxdb.sock"
max-body-size = 25000000
access-log-path = ""
max-concurrent-write-limit = 0
max-enqueued-write-limit = 0
enqueued-write-timeout = 30000000000
[logging]
format = "auto"
level = "info"
suppress-logo = false
[[graphite]]
enabled = false
bind-address = ":2003"
database = "graphite"
retention-policy = ""
protocol = "tcp"
batch-size = 5000
batch-pending = 10
batch-timeout = "1s"
consistency-level = "one"
separator = "."
udp-read-buffer = 0
[[collectd]]
enabled = false
bind-address = ":25826"
database = "collectd"
retention-policy = ""
batch-size = 5000
batch-pending = 10
batch-timeout = "10s"
read-buffer = 0
typesdb = "/usr/share/collectd/types.db"
security-level = "none"
auth-file = "/etc/collectd/auth_file"
parse-multivalue-plugin = "split"
[[opentsdb]]
enabled = false
bind-address = ":4242"
database = "opentsdb"
retention-policy = ""
consistency-level = "one"
tls-enabled = false
certificate = "/etc/ssl/influxdb.pem"
batch-size = 1000
batch-pending = 5
batch-timeout = "1s"
log-point-errors = true
[[udp]]
enabled = false
bind-address = ":8089"
database = "udp"
retention-policy = ""
batch-size = 5000
batch-pending = 10
read-buffer = 0
batch-timeout = "1s"
precision = ""
[continuous_queries]
log-enabled = true
enabled = true
query-stats-enabled = false
run-interval = "1s"
[tls]
min-version = ""
max-version = ""
修改參數
默認influxDB使用以下端口
- 8086: 用於客戶端和服務端交互的HTTP API
- 8088: 用於提供備份和恢復的RPC服務
我這裏修改配置文件,使用8087端口,我這裏8088和其他服務衝突了
同時修改了數據保存的路徑
> vim /etc/influxdb/influxdb.conf
bind-address = "127.0.0.1:8087"
# metadata 保存路徑
dir = "/root/jast/influxdb/meta"
#數據保存路徑
dir = "/root/jast/influxdb/data"
#`write-ahead-log(WAL)保存路徑
wal-dir = "/root/jast/influxdb/wal"
注意:選擇的路徑要有權限,否則會啓動失敗
啓動 influxdb
systemctl start influxdb
查看啓動狀態
systemctl status influxdb
此時influxdb已經啓動成功
我們也可以指定配置文件啓動,在/etc/influxdb/influxdb.conf ,是默認的目錄也可以不指定
influxd -config /etc/influxdb/influxdb.conf
設置基本配置
在服務器 輸入 influx 進入交互頁面
[root@ecs-t-001-0001 influx]# influx
Connected to http://localhost:8086 version 1.8.0
InfluxDB shell version: 1.8.0
>
創建用戶
CREATE USER "admin" WITH PASSWORD '123456' WITH ALL PRIVILEGES
創建數據庫(後面保存數據用)
create database "jmxDB"
查看是否創建成功
[root@ecs-t-001-0001 jmxtrans]# influx
Connected to http://localhost:8086 version 1.8.0
InfluxDB shell version: 1.8.0
> show databases
name: databases
name
----
_internal
jmxDB
>
influxdb 其他命令擴展
#創建數據庫
create database "db_name"
#顯示所有的數據庫
show databases
#刪除數據庫
drop database "db_name"
#使用數據庫
use db_name
#顯示該數據庫中所有的表
show measurements
#創建表,直接在插入數據的時候指定表名
insert test,host=127.0.0.1,monitor_name=test count=1
#刪除表
drop measurement "measurement_name"
#退出
quit
安裝 jmxtrans
安裝我們剛剛下載jmxtrans rpm文件
rpm -ivh jmxtrans-20121016.145842.6a28c97fbb-0.noarch.rpm
安裝完成後默認安裝目錄在
[root@ecs-t-001-0001 jmxtrans]# whereis jmxtrans
jmxtrans: /usr/share/jmxtrans
這裏我們先簡單配置Kafka 的Memory 監控,其他配置在文末統一整理
我們創建json文件供jmxtrans讀取,json文件名稱自己根據業務取名即可
{
"servers" : [ {
"port" : "9393",
"host" : "172.11.0.1",
"queries" : [ {
"obj" : "java.lang:type=Memory",
"attr" : [ "HeapMemoryUsage", "NonHeapMemoryUsage" ],
"resultAlias":"jvmMemory",
"outputWriters" : [ {
"@class" : "com.googlecode.jmxtrans.model.output.InfluxDbWriterFactory",
"url" : "http://172.11.0.1:8086/",
"username" : "admin",
"password" : "123456",
"database" : "jmxDB",
"tags" : {"application" : "kafka_server"}
} ]
} ]
} ]
}
簡單解釋一下上面的說明
port: 我們要監控的Kafka JMX端口
host:我們要監控的Kafka host
resultAlias:自定義表名,收集到的數據會存入influxdb的定義的表中,自動創建
outputWriters爲連接influxdb的配置
@class不需要修改
url:influxdb的機器+端口,默認端口8086
username和password:influxdb的用戶和密碼
database:influxdb數據庫(我們剛剛創建的)
啓動之前我們把 /usr/share/jmxtrans 目錄下的所有 .json 文件換個名,因爲它會默認會讀取 /usr/share/jmxtrans 目錄下的所有json文件
在 /usr/share/jmxtrans 目錄下啓動 jmxtrans.sh
jmxtrans.sh start
到這裏正常來說就是要啓動成功了,我們先說下可能遇到的異常
可能遇到的異常
異常1
[root@ecs-t-001-0001 jmxtrans]# Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=384m; support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=384m; support was removed in 8.0
MaxTenuringThreshold of 16 is invalid; must be between 0 and 15
Error: Could not create the Java Virtual Machine.
Error: A fatal exception has occurred. Program will exit.
MaxTenuringThreshold 這個參數用於控制對象能經歷多少次Minor GC才晉升到舊生代
提示設置的是16,但是範圍在0-15,我們直接修改一下啓動腳本 jmxtrans.sh
> vim jmxtrans.sh
GC_OPTS=${GC_OPTS:-"-Xms${HEAP_SIZE}M -Xmx${HEAP_SIZE}M -XX:+UseConcMarkSweepGC -XX:NewRatio=${NEW_RATIO} -XX:NewSize=${NEW_SIZE}m -XX:MaxNewSize=${NEW_SIZE}m -XX:MaxTenuringThreshold=15 -XX:GCTimeRatio=9 -XX:PermSize=${PERM_SIZE}m -XX:MaxPermSize=${MAX_PERM_SIZE}m -XX:+UseTLAB -XX:CMSInitiatingOccupancyFraction=${IO_FRACTION} -XX:+CMSIncrementalMode -XX:+CMSIncrementalPacing -XX:ParallelGCThreads=${CPU_CORES} -Dsun.rmi.dgc.server.gcInterval=28800000 -Dsun.rmi.dgc.client.gcInterval=28800000"}
異常2
[root@ecs-t-001-0001 jmxtrans]# Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=384m; support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=384m; support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: Using incremental CMS is deprecated and will likely be removed in a future release
Exception in thread "main" com.googlecode.jmxtrans.util.LifecycleException: com.googlecode.jmxtrans.util.LifecycleException: Error parsing json: /var/lib/jmxtrans/kafka.json
at com.googlecode.jmxtrans.JmxTransformer.start(JmxTransformer.java:146)
at com.googlecode.jmxtrans.JmxTransformer.doMain(JmxTransformer.java:107)
at com.googlecode.jmxtrans.JmxTransformer.main(JmxTransformer.java:92)
Caused by: com.googlecode.jmxtrans.util.LifecycleException: Error parsing json: /var/lib/jmxtrans/kafka.json
at com.googlecode.jmxtrans.JmxTransformer.processFilesIntoServers(JmxTransformer.java:358)
at com.googlecode.jmxtrans.JmxTransformer.startupSystem(JmxTransformer.java:301)
at com.googlecode.jmxtrans.JmxTransformer.start(JmxTransformer.java:142)
... 2 more
Caused by: java.lang.IllegalArgumentException: Invalid type id 'com.googlecode.jmxtrans.model.output.InfluxDbWriterFactory' (for id type 'Id.class'): no such class found
at org.codehaus.jackson.map.jsontype.impl.ClassNameIdResolver.typeFromId(ClassNameIdResolver.java:89)
at org.codehaus.jackson.map.jsontype.impl.TypeDeserializerBase._findDeserializer(TypeDeserializerBase.java:73)
at org.codehaus.jackson.map.jsontype.impl.AsPropertyTypeDeserializer.deserializeTypedFromObject(AsPropertyTypeDeserializer.java:65)
at org.codehaus.jackson.map.deser.AbstractDeserializer.deserializeWithType(AbstractDeserializer.java:81)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:118)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:93)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:25)
at org.codehaus.jackson.map.deser.SettableBeanProperty.deserialize(SettableBeanProperty.java:149)
at org.codehaus.jackson.map.deser.SettableBeanProperty$MethodProperty.deserializeAndSet(SettableBeanProperty.java:237)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:496)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserialize(BeanDeserializer.java:350)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:116)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:93)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:25)
at org.codehaus.jackson.map.deser.SettableBeanProperty.deserialize(SettableBeanProperty.java:149)
at org.codehaus.jackson.map.deser.SettableBeanProperty$MethodProperty.deserializeAndSet(SettableBeanProperty.java:237)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:496)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserialize(BeanDeserializer.java:350)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:116)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:93)
at org.codehaus.jackson.map.deser.CollectionDeserializer.deserialize(CollectionDeserializer.java:25)
at org.codehaus.jackson.map.deser.SettableBeanProperty.deserialize(SettableBeanProperty.java:149)
at org.codehaus.jackson.map.deser.SettableBeanProperty$MethodProperty.deserializeAndSet(SettableBeanProperty.java:237)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserializeFromObject(BeanDeserializer.java:496)
at org.codehaus.jackson.map.deser.BeanDeserializer.deserialize(BeanDeserializer.java:350)
at org.codehaus.jackson.map.ObjectMapper._readMapAndClose(ObjectMapper.java:1980)
at org.codehaus.jackson.map.ObjectMapper.readValue(ObjectMapper.java:1225)
at com.googlecode.jmxtrans.util.JmxUtils.getJmxProcess(JmxUtils.java:494)
at com.googlecode.jmxtrans.JmxTransformer.processFilesIntoServers(JmxTransformer.java:352)
... 4 more
說是解析 com.googlecode.jmxtrans.util.LifecycleException 異常,這裏需要我們自己編譯一下jar包,在文章開頭我們下載過jmxtrans源碼
在 項目目錄下進行編譯,文末有我編譯好的jar包
mvn clean package -Dmaven.test.skip=true -DskipTests=true;
編譯完成我們需要的jar包在 jmxtrans-jmxtrans-parent-271\jmxtrans\target 目錄下
jmxtrans-271-all.jar 就是我們需要用的jar包
將jar包傳到jmxtrans目錄下
我們對比一下發現我們編譯的包是有這個類的,而他自帶的那個沒有
[root@ecs-t-001-0001 jmxtrans]# grep 'com.googlecode.jmxtrans.model.output.InfluxDbWriterFactory' ./jmxtrans-271-all.jar
Binary file ./jmxtrans-271-all.jar matches
[root@ecs-t-001-0001 jmxtrans]# grep 'com.googlecode.jmxtrans.model.output.InfluxDbWriterFactory' ./jmxtrans-all.jar
[root@ecs-t-001-0001 jmxtrans]#
替換 jmxtrans.sh 中應用的 jmxtrans jar包名稱
#JAR_FILE=${JAR_FILE:-"jmxtrans-all.jar"}
JAR_FILE=${JAR_FILE:-"jmxtrans-271-all.jar"}
再次啓動即可
驗證jmxtrans是否成功運行
進入 influx jmxDB數據庫(我們之前創建的),查看錶 show MEASUREMENTS,我們發現自動創了jvmMemory
[root@ecs-t-001-0001 jmxtrans]# influx
Connected to http://localhost:8086 version 1.8.0
InfluxDB shell version: 1.8.0
> show databases
name: databases
name
----
_internal
jmxDB
> use jmxDB
Using database jmxDB
> show
ERR: error parsing query: found EOF, expected CONTINUOUS, DATABASES, DIAGNOSTICS, FIELD, GRANTS, MEASUREMENT, MEASUREMENTS, QUERIES, RETENTION, SERIES, SHARD, SHARDS, STATS, SUBSCRIPTIONS, TAG, USERS at line 1, char 6
> show MEASUREMENTS
name: measurements
name
----
jvmMemory
具體查看數據,發現有數據寫入
至此 jmxtrans 已成功監控 Kafka JMX端口,離成功更近了
安裝 Grafana
安裝
yum install grafana-6.7.3-1.x86_64.rpm
配置文件默認路徑 /etc/grafana/grafana.ini
修改下web端口
> vim /etc/grafana/grafana.ini
# web頁面端口默認3000
http_port = 9099
啓動服務並設置爲開機啓動
systemctl start grafana-server
systemctl enable grafana-server
查看啓動狀態
systemctl status grafana-server
訪問 web頁面 ,第一次登陸賬號密碼 是 admin/admin ,登陸完成後會提示你設置密碼
開始配置Grafana顯示模板
點擊DataSource
選擇添加 InfluxDB ,並填寫基本信息
填寫 influxDB 數據庫信息
點擊保存
此時InfluxDB配置完成 ,我們幾區創建儀表盤
選擇 添加一個查詢
選擇我們上面設置的KafkaMonitor(因爲我們只有一個默認的也是這個)
簡單修改一下sql,因爲我們上面只監控了JMX中的內存信息
注意:如果上面使用tag進行劃分,這裏就不要設置了,否則都叫Jvm內存使用了
簡單配置完成,我們對比一下監控與jconsole監控對比
influxDB 與 Grafana 監控模板
模板下載
鏈接:https://pan.baidu.com/s/1ld-Yhv7wVutRxslV084GoQ
提取碼:0pzr
複製這段內容後打開百度網盤手機App,操作更方便哦