1-创建命名空间elk
kubectl create ns elk
2-安装NFS
mkdir /nfs_data/es
chmod -R 777 /nfs_data/es
vim /etc/exports
#添加如下内容
/nfs_data/es 192.168.0.0/16(rw,sync,no_subtree_check,no_root_squash)
#重启nfs
systemctl restart nfs
#查看共享目录
showmount -e 192.168.1.2
3-创建StorageClass
4-创建es有状态服务es-cluster.yaml
会自动创建PV和PVC
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: elk
spec:
# 必须设置
serviceName: es-cluster-svc
replicas: 3
selector:
# 设置标签
matchLabels:
app: es-cluster
template:
metadata:
# 此处必须要与上面的matchLabels相同
labels:
app: es-cluster
spec:
# 初始化容器
# 初始化容器的作用是在应用容器启动之前做准备工作,每个init容器都必须在下一个启动之前成功完成
initContainers:
- name: increase-vm-max-map
image: busybox:1.32
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox:1.32
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
# 初始化容器结束后,才能继续创建下面的容器
containers:
- name: es-container
image: elasticsearch:7.9.1
ports:
# 容器内端口
- name: rest
containerPort: 9200
protocol: TCP
# 限制CPU数量
resources:
limits:
memory: 1536Mi
cpu: 1000m
requests:
cpu: 100m
memory: 1536Mi
# 设置挂载目录
volumeMounts:
- name: es-data
mountPath: /usr/share/elasticsearch/data
- name: es-plugins
mountPath: /usr/share/elasticsearch/plugins
# 设置环境变量
env:
# 自定义集群名
- name: cluster.name
value: k8s-es
# 定义节点名,使用metadata.name名称
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
# 初始化集群时,ES从中选出master节点
- name: cluster.initial_master_nodes
# 对应metadata.name名称加编号,编号从0开始
value: "es-cluster-0,es-cluster-1,es-cluster-2"
- name: discovery.zen.minimum_master_nodes
value: "2"
# 发现节点的地址,discovery.seed_hosts的值应包括所有master候选节点
# 如果discovery.seed_hosts的值是一个域名,且该域名解析到多个IP地址,那么es将处理其所有解析的IP地址。
- name: discovery.seed_hosts
value: "es-cluster-0.es-cluster-svc,es-cluster-1.es-cluster-svc,es-cluster-2.es-cluster-svc"
# 配置内存
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: network.host
value: "0.0.0.0"
volumeClaimTemplates:
- metadata:
# 对应容器中volumeMounts.name
name: es-data
labels:
app: es-volume
spec:
# 存储卷可以被单个节点读写
accessModes: [ "ReadWriteOnce" ]
# 对应es-nfs-storage-class.yaml中的metadata.name
storageClassName: managed-nfs-storage
# 申请资源的大小
resources:
requests:
storage: 10Gi
- metadata:
# 对应容器中volumeMounts.name
name: es-plugins
spec:
# 存储卷可以被单个节点读写
accessModes: [ "ReadWriteOnce" ]
# 对应es-nfs-storage-class.yaml中的metadata.name
storageClassName: managed-nfs-storage
# 申请资源的大小
resources:
requests:
storage: 2Gi
5-创建es服务es-service.yaml
apiVersion: v1
kind: Service
metadata:
name: es-cluster-svc
namespace: elk
spec:
selector:
# 注意一定要与"es-cluster.yaml"中spec.selector.matchLabels相同
app: es-cluster
# 设置服务类型
type: NodePort
ports:
- name: rest
# 服务端口
port: 9200
# 应用端口(Pod端口)
targetPort: 9200
# 映射到主机的端口,端口范围是30000~32767
nodePort: 29200
6-检查es集群
查看集群节点:http://192.168.1.2:29200/_cat/nodes?v
查看集群健康状态:http://192.168.1.2:29200/_cat/health?v
7-部署kibana
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elk
spec:
selector:
matchLabels:
app: kibana
replicas: 1
template:
metadata:
labels:
app: kibana
spec:
restartPolicy: Always
containers:
- name: kibana
image: kibana:7.9.1
imagePullPolicy: Always
ports:
- containerPort: 5601
env:
- name: ELASTICSEARCH_HOSTS
value: http://es-cluster-svc:9200
- name: I18N.LOCALE
value: zh-CN #汉化
resources:
requests:
memory: 1024Mi
cpu: 50m
limits:
memory: 1024Mi
cpu: 1000m
---
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: elk
spec:
type: NodePort
ports:
- name: kibana
port: 5601
targetPort: 5601
nodePort: 25601
selector:
app: kibana
评论区