Docker 安装 elk(elasticsearch、logstash、kibana)、ES安装ik分词器
前期准备
- 创建kibana配置文件路径
mkdir -p /data/docker/logstash/config
- 先启动临时 logstash服务,目的拷贝
/usr/share/logstash/pipeline/logstash.conf
文件
# 启动
docker run --name logstash -d docker.elastic.co/logstash/logstash:7.10.1
# 拷贝
docker container cp logstash:/usr/share/logstash/pipeline/logstash.conf /data/docker/logstash/config
# 编辑
vi /data/docker/logstash/config/logstash.conf
# 停止删除容器
docker rm -f logstash
# 设置权限
chmod -R 777 /data/docker/logstash
- logstash.conf文件配置-参考:https://cloud.tencent.com/developer/article/2355726
input {
tcp {
mode => "server"
host => "0.0.0.0"
port => 4560
codec => json
}
}
output {
elasticsearch {
hosts => "elasticsearch:9200"
index => "logstash-%{+YYYY.MM.dd}"
}
}
docker执行
# 创建一个名为 elk 的桥接网络
docker network create elk
# 启动 Elasticsearch
docker run -d \
--name elasticsearch \
--restart always \
-e TZ=Asia/Shanghai \
-e node.name=elasticsearch \
-e cluster.name=es-docker-cluster \
-e discovery.type=single-node \
-e bootstrap.memory_lock=true \
-e ES_JAVA_OPTS=-Xms512m -Xmx512m \
--ulimit memlock=-1:-1 \
-v esdata:/usr/share/elasticsearch/data \
-p 9200:9200 \
-p 9300:9300 \
--network elk \
docker.elastic.co/elasticsearch/elasticsearch:7.10.1
# 启动 Logstash
docker run -d \
--name logstash \
--restart always \
-v /data/docker/logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf \
-p 4560:4560 \
-p 5044:5044 \
-p 5000:5000/tcp \
-p 5000:5000/udp \
-e TZ=Asia/Shanghai \
-e LS_JAVA_OPTS="-Xmx256m -Xms256m" \
--network elk \
docker.elastic.co/logstash/logstash:7.10.1
# 启动 Kibana
docker run -d \
--name kibana \
--restart always \
-e TZ=Asia/Shanghai \
-e ELASTICSEARCH_URL=http://elasticsearch:9200 \
-e ELASTICSEARCH_HOSTS=http://elasticsearch:9200 \
-p 5601:5601 \
--network elk \
docker.elastic.co/kibana/kibana:7.10.1
docker-compose执行
- docker-compose 文件
version: '3.1'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:7.10.1
container_name: elasticsearch
restart: always
environment:
- TZ=Asia/Shanghai
- node.name=elasticsearch
- cluster.name=es-docker-cluster
- discovery.type=single-node
- bootstrap.memory_lock=true
- ES_JAVA_OPTS=-Xms512m -Xmx512m
ulimits:
memlock:
soft: -1
hard: -1
volumes:
- esdata:/usr/share/elasticsearch/data
ports:
- "9200:9200"
- "9300:9300"
networks:
- elk
logstash:
image: docker.elastic.co/logstash/logstash:7.10.1
container_name: logstash
restart: always
volumes:
- /data/docker/logstash/config/logstash.conf:/usr/share/logstash/pipeline/logstash.conf
ports:
- "4560:4560"
- "5044:5044"
- "5000:5000/tcp"
- "5000:5000/udp"
environment:
TZ: Asia/Shanghai
LS_JAVA_OPTS: "-Xmx256m -Xms256m"
depends_on:
- elasticsearch
networks:
- elk
kibana:
image: docker.elastic.co/kibana/kibana:7.10.1
container_name: kibana
restart: always
environment:
TZ: Asia/Shanghai
ELASTICSEARCH_URL: http://elasticsearch:9200
ELASTICSEARCH_HOSTS: http://elasticsearch:9200
ports:
- "5601:5601"
depends_on:
- elasticsearch
networks:
- elk
volumes:
esdata:
driver: local
networks:
elk:
driver: bridge
- 运行docker-comopse命令
docker-compose up -d
安装ik分词器
ik分词器地址:https://github.com/infinilabs/analysis-ik
使用官方安装方式:bin/elasticsearch-plugin install https://get.infini.cloud/elasticsearch/analysis-ik/7.10.1
# 进入容器
docker exec -it elasticsearch bash
# 安装ik分词器插件
bin/elasticsearch-plugin install https://get.infini.cloud/elasticsearch/analysis-ik/7.10.1
# 重启es
docker restart elasticsearch
验证ik分词器是否安装成功
进入kibana页面devtools页面
输入查询命令:
POST /_analyze
{
"analyzer": "ik_smart",
"text":["中国人最牛"]
}
原文地址:https://blog.csdn.net/ayunnuo/article/details/145255886
免责声明:本站文章内容转载自网络资源,如侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!