自学内容网 自学内容网

pcs集群表决盘故障导致主机reboot

建议重建fence设备并配置

[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:19:42 2024
Last change: Sat Sep 28 19:10:44 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
12 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 fence_jycdb01        (stonith:fence_sbd):    Started jycdb01-prv
 fence_jycdb02        (stonith:fence_sbd):    Started jycdb02-prv
 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs stonith delete fence_jycdb02
Attempting to stop: fence_jycdb02... Stopped
[root@jycdb01 ~]# 
[root@jycdb01 ~]# 
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:20:38 2024
Last change: Sun Sep 29 12:20:30 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 fence_jycdb01        (stonith:fence_sbd):    Started jycdb01-prv
 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb02-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb02-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb02-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb02-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs stonith delete fence_jycdb01
Attempting to stop: fence_jycdb01... Stopped
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:20:46 2024
Last change: Sun Sep 29 12:20:44 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
10 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb02-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb02-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb02-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb02-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# multipath -ll|grep qur
[root@jycdb01 ~]# multipath -ll|grep qu
size=1.0T features='1 queue_if_no_path' hwhandler='0' wp=rw
size=1.5T features='1 queue_if_no_path' hwhandler='0' wp=rw
size=2.0T features='1 queue_if_no_path' hwhandler='0' wp=rw
quorum (360060e80225748005041574800000029) dm-5 HITACHI ,OPEN-V          
size=2.0G features='1 queue_if_no_path' hwhandler='0' wp=rw
quochk (360060e80225748005041574800000100) dm-6 HITACHI ,OPEN-V          
size=5.0G features='1 queue_if_no_path' hwhandler='0' wp=rw
[root@jycdb01 ~]# ll /dev/mapper/quo*
lrwxrwxrwx 1 root root 7 9月  29 11:26 /dev/mapper/quochk -> ../dm-6
lrwxrwxrwx 1 root root 7 9月  29 12:19 /dev/mapper/quorum -> ../dm-5
[root@jycdb01 ~]# vi /etc/sysconfig/sbd
[root@jycdb01 ~]# cat /etc/sysconfig/sbd|head
## Type: string
## Default: ""
#
# SBD_DEVICE specifies the devices to use for exchanging sbd messages
# and to monitor. If specifying more than one path, use ";" as
# separator.
#
#SBD_DEVICE=""
SBD_DEVICE="/dev/mapper/quochk"
SBD_OPTS="-n jycdb01"
[root@jycdb01 ~]# ssh 192.168.52.75
root@192.168.52.75's password: 
Last login: Sun Sep 29 11:50:41 2024 from 10.143.84.102
[root@jycdb02 ~]# vi /etc/sysconfig/sbd
[root@jycdb02 ~]# head /etc/sysconfig/sbd
## Type: string
## Default: ""
#
# SBD_DEVICE specifies the devices to use for exchanging sbd messages
# and to monitor. If specifying more than one path, use ";" as
# separator.
#
#SBD_DEVICE=""
SBD_DEVICE="/dev/mapper/quochk"
SBD_OPTS="-n jycdb02"
[root@jycdb02 ~]# ll /dev/mapper/quochk 
lrwxrwxrwx 1 root root 7 9月  29 11:31 /dev/mapper/quochk -> ../dm-6
[root@jycdb02 ~]# pcs stonith show --full 
[root@jycdb02 ~]# exit
登出
Connection to 192.168.52.75 closed.
[root@jycdb01 ~]# cs stonith sbd device setup --device=/dev/mapper/quochk
bash: cs: 未找到命令...
相似命令是::
'ss'
'cc'
[root@jycdb01 ~]# 
[root@jycdb01 ~]# 
[root@jycdb01 ~]# pcs stonith sbd device setup --device=/dev/mapper/quochk
WARNING: All current content on device(s) '/dev/mapper/quochk' will be overwritten. Are you sure you want to continue? [y/N] y
Initializing device(s) /dev/mapper/quochk...
Device(s) initialized successfuly
[root@jycdb01 ~]# ssh 192.168.52.75
root@192.168.52.75's password: 
Last login: Sun Sep 29 12:22:17 2024 from jycdb01
[root@jycdb02 ~]# pcs stonith sbd device setup --device=/dev/mapper/quochk
WARNING: All current content on device(s) '/dev/mapper/quochk' will be overwritten. Are you sure you want to continue? [y/N] y
Initializing device(s) /dev/mapper/quochk...
Device(s) initialized successfuly
[root@jycdb02 ~]# systemctl enable --now sbd
Failed to start sbd.service: Operation refused, unit sbd.service may be requested by dependency only (it is configured to refuse manual start/stop).
See system logs and 'systemctl status sbd.service' for details.
[root@jycdb02 ~]# systemctl status sbd
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: active (running) since 日 2024-09-29 11:31:16 CST; 54min ago
 Main PID: 20231 (sbd)
   CGroup: /system.slice/sbd.service
           ├─20231 sbd: inquisitor
           ├─20234 sbd: watcher: /dev/mapper/quorum - slot: 1 - uuid: 015ae81e-f47d-493b-a741-d431ea3d919a
           ├─20235 sbd: watcher: Pacemaker
           └─20236 sbd: watcher: Cluster

9月 29 11:31:15 jycdb02 systemd[1]: Starting Shared-storage based fencing daemon...
9月 29 11:31:16 jycdb02 systemd[1]: Started Shared-storage based fencing daemon.
[root@jycdb02 ~]# systemctl stop sbd
Failed to stop sbd.service: Operation refused, unit sbd.service may be requested by dependency only (it is configured to refuse manual start/stop).
See system logs and 'systemctl status sbd.service' for details.
[root@jycdb02 ~]# systemctl status sbd.service
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: active (running) since 日 2024-09-29 11:31:16 CST; 54min ago
 Main PID: 20231 (sbd)
   CGroup: /system.slice/sbd.service
           ├─20231 sbd: inquisitor
           ├─20234 sbd: watcher: /dev/mapper/quorum - slot: 1 - uuid: 015ae81e-f47d-493b-a741-d431ea3d919a
           ├─20235 sbd: watcher: Pacemaker
           └─20236 sbd: watcher: Cluster

9月 29 11:31:15 jycdb02 systemd[1]: Starting Shared-storage based fencing daemon...
9月 29 11:31:16 jycdb02 systemd[1]: Started Shared-storage based fencing daemon.
[root@jycdb02 ~]# vi /usr/lib/systemd/system/sbd.service
[root@jycdb02 ~]# exit
登出
Connection to 192.168.52.75 closed.
[root@jycdb01 ~]# pcs stonith create sbd_fencing fence_sbd devices=/dev/mapper/quochk
[root@jycdb01 ~]# ssh 192.168.52.75
root@192.168.52.75's password: 
Last login: Sun Sep 29 12:24:43 2024 from jycdb01
[root@jycdb02 ~]# pcs stonith create sbd_fencing fence_sbd devices=/dev/mapper/quochk
Error: 'sbd_fencing' already exists
[root@jycdb02 ~]# more /system.slice/sbd.service
/system.slice/sbd.service: 没有那个文件或目录
[root@jycdb02 ~]# find / -name sbd.service
/sys/fs/cgroup/pids/system.slice/sbd.service
/sys/fs/cgroup/devices/system.slice/sbd.service
/sys/fs/cgroup/systemd/system.slice/sbd.service
/etc/systemd/system/corosync.service.requires/sbd.service
/etc/systemd/system/pacemaker.service.requires/sbd.service
/etc/systemd/system/dlm.service.requires/sbd.service
/usr/lib/systemd/system/sbd.service
more ^C
[root@jycdb02 ~]# grep quo /sys/fs/cgroup/pids/system.slice/sbd.service
grep: /sys/fs/cgroup/pids/system.slice/sbd.service: 是一个目录
[root@jycdb02 ~]# grep quo /etc/systemd/system/corosync.service.requires/sbd.service
[root@jycdb02 ~]# grep quo /etc/systemd/system/pacemaker.service.requires/sbd.service
[root@jycdb02 ~]# grep quo /usr/lib/systemd/system/sbd.service
[root@jycdb02 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:30:03 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb02-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb02-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb02-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb02-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb02 ~]# pcs cluster stop
Stopping Cluster (pacemaker)...
Stopping Cluster (corosync)...
[root@jycdb02 ~]# systemctl status sbd.service
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: inactive (dead) since 日 2024-09-29 12:31:01 CST; 6s ago
  Process: 53141 ExecStop=/usr/bin/kill -TERM $MAINPID (code=exited, status=0/SUCCESS)
 Main PID: 20231 (code=exited, status=0/SUCCESS)

9月 29 11:31:15 jycdb02 systemd[1]: Starting Shared-storage based fencing daemon...
9月 29 11:31:16 jycdb02 systemd[1]: Started Shared-storage based fencing daemon.
9月 29 12:31:01 jycdb02 systemd[1]: Stopping Shared-storage based fencing daemon...
9月 29 12:31:01 jycdb02 sbd[20231]:  warning: cleanup_servant_by_pid: Servant for pcmk (pid: 20235) has terminated
9月 29 12:31:01 jycdb02 sbd[20231]:  warning: cleanup_servant_by_pid: Servant for cluster (pid: 20236) has terminated
9月 29 12:31:01 jycdb02 sbd[20231]:  warning: cleanup_servant_by_pid: Servant for /dev/mapper/quorum (pid: 20234) has terminated
9月 29 12:31:01 jycdb02 systemd[1]: Stopped Shared-storage based fencing daemon.
[root@jycdb02 ~]# systemctl stop sbd.service
Failed to stop sbd.service: Operation refused, unit sbd.service may be requested by dependency only (it is configured to refuse manual start/stop).
See system logs and 'systemctl status sbd.service' for details.
[root@jycdb02 ~]# systemctl start sbd.service
Failed to start sbd.service: Operation refused, unit sbd.service may be requested by dependency only (it is configured to refuse manual start/stop).
See system logs and 'systemctl status sbd.service' for details.
[root@jycdb02 ~]# reboot
Connection to 192.168.52.75 closed by remote host.
Connection to 192.168.52.75 closed.
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:31:41 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv ]
     Stopped: [ jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# systemctl status sbd
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: active (running) since 日 2024-09-29 11:26:36 CST; 1h 5min ago
 Main PID: 20218 (sbd)
   CGroup: /system.slice/sbd.service
           ├─20218 sbd: inquisitor
           ├─20223 sbd: watcher: /dev/mapper/quorum - slot: 0 - uuid: 015ae81e-f47d-493b-a741-d431ea3d919a
           ├─20224 sbd: watcher: Pacemaker
           └─20225 sbd: watcher: Cluster

9月 29 11:26:35 jycdb01 systemd[1]: Starting Shared-storage based fencing daemon...
9月 29 11:26:36 jycdb01 systemd[1]: Started Shared-storage based fencing daemon.
9月 29 11:29:17 jycdb01 sbd[20225]:    cluster:  warning: set_servant_health: Connected to corosync but requires both nodes present
9月 29 11:29:17 jycdb01 sbd[20218]:  warning: inquisitor_child: cluster health check: UNHEALTHY
9月 29 11:29:17 jycdb01 sbd[20218]:  warning: inquisitor_child: Servant cluster is outdated (age: 181)
9月 29 12:31:01 jycdb01 sbd[20225]:    cluster:  warning: set_servant_health: Connected to corosync but requires both nodes present
9月 29 12:31:01 jycdb01 sbd[20218]:  warning: inquisitor_child: cluster health check: UNHEALTHY
9月 29 12:31:01 jycdb01 sbd[20218]:  warning: inquisitor_child: Servant cluster is outdated (age: 3885)
[root@jycdb01 ~]# pcs cluster stop
Error: Stopping the node will cause a loss of the quorum, use --force to override
[root@jycdb01 ~]# pcs cluster stop --force
Stopping Cluster (pacemaker)...
Stopping Cluster (corosync)...
[root@jycdb01 ~]# systemctl status sbd
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: inactive (dead) since 日 2024-09-29 12:32:34 CST; 17s ago
  Process: 104705 ExecStop=/usr/bin/kill -TERM $MAINPID (code=exited, status=0/SUCCESS)
 Main PID: 20218 (code=exited, status=0/SUCCESS)

9月 29 11:29:17 jycdb01 sbd[20218]:  warning: inquisitor_child: cluster health check: UNHEALTHY
9月 29 11:29:17 jycdb01 sbd[20218]:  warning: inquisitor_child: Servant cluster is outdated (age: 181)
9月 29 12:31:01 jycdb01 sbd[20225]:    cluster:  warning: set_servant_health: Connected to corosync but requires both nodes present
9月 29 12:31:01 jycdb01 sbd[20218]:  warning: inquisitor_child: cluster health check: UNHEALTHY
9月 29 12:31:01 jycdb01 sbd[20218]:  warning: inquisitor_child: Servant cluster is outdated (age: 3885)
9月 29 12:32:34 jycdb01 systemd[1]: Stopping Shared-storage based fencing daemon...
9月 29 12:32:34 jycdb01 sbd[20218]:  warning: cleanup_servant_by_pid: Servant for pcmk (pid: 20224) has terminated
9月 29 12:32:34 jycdb01 sbd[20218]:  warning: cleanup_servant_by_pid: Servant for cluster (pid: 20225) has terminated
9月 29 12:32:34 jycdb01 sbd[20218]:  warning: cleanup_servant_by_pid: Servant for /dev/mapper/quorum (pid: 20223) has terminated
9月 29 12:32:34 jycdb01 systemd[1]: Stopped Shared-storage based fencing daemon.
[root@jycdb01 ~]# systemctl start sbd
Failed to start sbd.service: Operation refused, unit sbd.service may be requested by dependency only (it is configured to refuse manual start/stop).
See system logs and 'systemctl status sbd.service' for details.
[root@jycdb01 ~]# pcs cluster start
Starting Cluster (corosync)...
Starting Cluster (pacemaker)...
[root@jycdb01 ~]# systemctl status sbd
● sbd.service - Shared-storage based fencing daemon
   Loaded: loaded (/usr/lib/systemd/system/sbd.service; enabled; vendor preset: disabled)
   Active: active (running) since 日 2024-09-29 12:33:05 CST; 3s ago
  Process: 104705 ExecStop=/usr/bin/kill -TERM $MAINPID (code=exited, status=0/SUCCESS)
  Process: 104815 ExecStart=/usr/sbin/sbd $SBD_OPTS -p /var/run/sbd.pid watch (code=exited, status=0/SUCCESS)
 Main PID: 104818 (sbd)
    Tasks: 4
   CGroup: /system.slice/sbd.service
           ├─104818 sbd: inquisitor
           ├─104823 sbd: watcher: /dev/mapper/quochk - slot: 0 - uuid: d65476d5-160e-47d9-8033-95948c0cc2dc
           ├─104825 sbd: watcher: Pacemaker
           └─104826 sbd: watcher: Cluster

9月 29 12:33:02 jycdb01 systemd[1]: Starting Shared-storage based fencing daemon...
9月 29 12:33:05 jycdb01 systemd[1]: Started Shared-storage based fencing daemon.
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: NONE
Last updated: Sun Sep 29 12:33:18 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

OFFLINE: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: NONE
Last updated: Sun Sep 29 12:33:21 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

OFFLINE: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: NONE
Last updated: Sun Sep 29 12:33:23 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

OFFLINE: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: NONE
Last updated: Sun Sep 29 12:33:27 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

OFFLINE: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition WITHOUT quorum
Last updated: Sun Sep 29 12:33:30 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition WITHOUT quorum
Last updated: Sun Sep 29 12:33:34 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition WITHOUT quorum
Last updated: Sun Sep 29 12:33:35 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition WITHOUT quorum
Last updated: Sun Sep 29 12:33:36 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Stopped: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Stopped
     db_lsnr    (ocf::heartbeat:oralsnr):       Stopped
     db_datavg  (ocf::heartbeat:LVM):   Stopped
     db_datafs  (ocf::heartbeat:Filesystem):    Stopped
     db_bakvg   (ocf::heartbeat:LVM):   Stopped
     db_bakfs   (ocf::heartbeat:Filesystem):    Stopped
     db_orac    (ocf::heartbeat:oracle):        Stopped
     alert_mail (ocf::heartbeat:MailTo):        Stopped
 sbd_fencing    (stonith:fence_sbd):    Stopped

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs stonith show --full 
 Resource: sbd_fencing (class=stonith type=fence_sbd)
  Attributes: devices=/dev/mapper/quochk
  Operations: monitor interval=60s (sbd_fencing-monitor-interval-60s)
[root@jycdb01 ~]# pcs stonith show  
 sbd_fencing    (stonith:fence_sbd):    Started jycdb02-prv
[root@jycdb01 ~]# pcs stonith sbd status --full  
SBD STATUS
<node name>: <installed> | <enabled> | <running>
jycdb01-prv: YES | YES | YES
jycdb02-prv: YES | YES | YES

Messages list on device '/dev/mapper/quochk':
0       jycdb01       clear
1       jycdb02       clear


SBD header on device '/dev/mapper/quochk':
==Dumping header on disk /dev/mapper/quochk
Header version     : 2.1
UUID               : d65476d5-160e-47d9-8033-95948c0cc2dc
Number of slots    : 255
Sector size        : 512
Timeout (watchdog) : 5
Timeout (allocate) : 2
Timeout (loop)     : 1
Timeout (msgwait)  : 10
==Header on disk /dev/mapper/quochk is dumped

[root@jycdb01 ~]# fence_sbd --devices=/dev/mapper/quochk -n jycdb01 -o status
Status: ON
[root@jycdb01 ~]# fence_sbd --devices=/dev/mapper/quochk -n jycdb02 -o status
Status: ON
[root@jycdb01 ~]# sbd -d /dev/mapper/quochk dump|grep msgwait
Timeout (msgwait)  : 10
[root@jycdb01 ~]# fence_sbd -o metadata|grep -A 2 power_timeout
        <parameter name="power_timeout" unique="0" required="0">
                <getopt mixed="--power-timeout=[seconds]" />
                <content type="second" default="20"  />
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:35:50 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb02-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# reboot
Connection to 192.168.52.74 closed by remote host.
Connection to 192.168.52.74 closed.
[root@rsb-apswb01 ~]# 


[root@rsb-apswb01 ~]# 
[root@rsb-apswb01 ~]# ssh 192.168.52.74
^C
[root@rsb-apswb01 ~]# ssh 192.168.52.75
root@192.168.52.75's password: 
Last login: Sun Sep 29 12:28:08 2024 from jycdb01
[root@jycdb02 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb02-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:37:22 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb02-prv ]
OFFLINE: [ jycdb01-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb02-prv ]
     Stopped: [ jycdb01-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb02-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb02-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb02-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb02-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb02-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb02 ~]# df -hT
文件系统                   类型      容量  已用  可用 已用% 挂载点
/dev/mapper/rhel-root      xfs       109G  8.9G  100G    9% /
devtmpfs                   devtmpfs  126G     0  126G    0% /dev
tmpfs                      tmpfs     126G   61M  126G    1% /dev/shm
tmpfs                      tmpfs     126G   11M  126G    1% /run
tmpfs                      tmpfs     126G     0  126G    0% /sys/fs/cgroup
/dev/mapper/rhel-u01       xfs       150G   32G  119G   21% /u01
/dev/loop0                 iso9660   4.2G  4.2G     0  100% /media
/dev/mapper/rhel-home      xfs       570G  205G  366G   36% /home
/dev/sda2                  xfs      1014M  207M  808M   21% /boot
/dev/sda1                  vfat      200M  9.8M  191M    5% /boot/efi
tmpfs                      tmpfs      26G  8.0K   26G    1% /run/user/42
tmpfs                      tmpfs      26G     0   26G    0% /run/user/1101
/dev/mapper/datavg-oradata xfs       2.9T  1.8T  1.2T   62% /oradata
/dev/mapper/bkupvg-backup  xfs       1.3T  438G  894G   33% /backup
tmpfs                      tmpfs      26G     0   26G    0% /run/user/0
[root@jycdb02 ~]# 
[root@jycdb02 ~]# 
[root@jycdb02 ~]# fence_sbd --devices=/dev/mapper/quochk -n jycdb02 -o status
Status: ON
[root@jycdb02 ~]# fence_sbd --devices=/dev/mapper/quochk -n jycdb01 -o status
Status: ON
[root@jycdb02 ~]# pcs stonith sbd status --full 
Warning: Unable to connect to jycdb01-prv (Failed connect to jycdb01-prv:2224; No route to host)
Warning: Unable to get status of SBD from node 'jycdb01-prv'
SBD STATUS
<node name>: <installed> | <enabled> | <running>
jycdb02-prv: YES | YES | YES
jycdb01-prv: N/A | N/A | N/A

Messages list on device '/dev/mapper/quochk':
0       jycdb01       clear
1       jycdb02       clear


SBD header on device '/dev/mapper/quochk':
==Dumping header on disk /dev/mapper/quochk
Header version     : 2.1
UUID               : d65476d5-160e-47d9-8033-95948c0cc2dc
Number of slots    : 255
Sector size        : 512
Timeout (watchdog) : 5
Timeout (allocate) : 2
Timeout (loop)     : 1
Timeout (msgwait)  : 10
==Header on disk /dev/mapper/quochk is dumped

[root@jycdb02 ~]# pcs stonith show --full 
 Resource: sbd_fencing (class=stonith type=fence_sbd)
  Attributes: devices=/dev/mapper/quochk
  Operations: monitor interval=60s (sbd_fencing-monitor-interval-60s)
[root@jycdb02 ~]# pcs stonith sbd status --full 
SBD STATUS
<node name>: <installed> | <enabled> | <running>
jycdb02-prv: YES | YES | YES
jycdb01-prv: YES | YES | YES

Messages list on device '/dev/mapper/quochk':
0       jycdb01       clear
1       jycdb02       clear


SBD header on device '/dev/mapper/quochk':
==Dumping header on disk /dev/mapper/quochk
Header version     : 2.1
UUID               : d65476d5-160e-47d9-8033-95948c0cc2dc
Number of slots    : 255
Sector size        : 512
Timeout (watchdog) : 5
Timeout (allocate) : 2
Timeout (loop)     : 1
Timeout (msgwait)  : 10
==Header on disk /dev/mapper/quochk is dumped

[root@jycdb02 ~]# 
[root@jycdb02 ~]# 
[root@jycdb02 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb02-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:39:30 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb02-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb02-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb02-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb02-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb02-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb02-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb02 ~]# reboot
Connection to 192.168.52.75 closed by remote host.
Connection to 192.168.52.75 closed.
[root@rsb-apswb01 ~]# 
[root@rsb-apswb01 ~]# 
[root@rsb-apswb01 ~]# ssh 192.168.52.74
root@192.168.52.74's password: 
Last login: Sun Sep 29 12:19:40 2024 from 192.168.52.56
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:40:36 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv ]
     Stopped: [ jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:40:42 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv ]
OFFLINE: [ jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv ]
     Stopped: [ jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb01-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# 
[root@jycdb01 ~]# fence_sbd -o metadata|grep -A 2 power_timeout
        <parameter name="power_timeout" unique="0" required="0">
                <getopt mixed="--power-timeout=[seconds]" />
                <content type="second" default="20"  />
[root@jycdb01 ~]# sbd -d /dev/mapper/quochk dump|grep msgwait
Timeout (msgwait)  : 10
[root@jycdb01 ~]# fence_sbd --devices=/dev/mapper/quochk -n jycdb01 -o status
Status: ON
[root@jycdb01 ~]# pcs stonith show --full 
 Resource: sbd_fencing (class=stonith type=fence_sbd)
  Attributes: devices=/dev/mapper/quochk
  Operations: monitor interval=60s (sbd_fencing-monitor-interval-60s)
[root@jycdb01 ~]# 
[root@jycdb01 ~]# 
[root@jycdb01 ~]# pcs config
Cluster Name: jycdb_cluster
Corosync Nodes:
 jycdb01-prv jycdb02-prv
Pacemaker Nodes:
 jycdb01-prv jycdb02-prv

Resources:
 Clone: db_ping-clone
  Resource: db_ping (class=ocf provider=pacemaker type=ping)
   Attributes: dampen=5s host_list=10.143.84.254 multiplier=1000
   Operations: monitor interval=10 timeout=60 (db_ping-monitor-interval-10)
               start interval=0s timeout=60 (db_ping-start-interval-0s)
               stop interval=0s timeout=20 (db_ping-stop-interval-0s)
 Group: db_grp
  Resource: db_vip (class=ocf provider=heartbeat type=IPaddr2)
   Attributes: cidr_netmask=24 ip=192.168.52.95 nic=bond4
   Operations: monitor interval=10s timeout=20s (db_vip-monitor-interval-10s)
               start interval=0s timeout=20s (db_vip-start-interval-0s)
               stop interval=0s timeout=20s (db_vip-stop-interval-0s)
  Resource: db_lsnr (class=ocf provider=heartbeat type=oralsnr)
   Attributes: home=/u01/19c/oracle/product/19c/db_1 sid=orac tns_admin=/u01/19c/oracle/product/19c/db_1/network/admin user=oracle
   Operations: methods interval=0s timeout=5s (db_lsnr-methods-interval-0s)
               monitor interval=10s timeout=30s (db_lsnr-monitor-interval-10s)
               start interval=0s timeout=120s (db_lsnr-start-interval-0s)
               stop interval=0s timeout=120s (db_lsnr-stop-interval-0s)
  Resource: db_datavg (class=ocf provider=heartbeat type=LVM)
   Attributes: exclusive=true volgrpname=datavg
   Operations: methods interval=0s timeout=5s (db_datavg-methods-interval-0s)
               monitor interval=10s timeout=30s (db_datavg-monitor-interval-10s)
               start interval=0s timeout=30s (db_datavg-start-interval-0s)
               stop interval=0s timeout=30s (db_datavg-stop-interval-0s)
  Resource: db_datafs (class=ocf provider=heartbeat type=Filesystem)
   Attributes: device=/dev/datavg/oradata directory=/oradata fstype=xfs
   Operations: monitor interval=20s timeout=40s (db_datafs-monitor-interval-20s)
               notify interval=0s timeout=60s (db_datafs-notify-interval-0s)
               start interval=0s timeout=60s (db_datafs-start-interval-0s)
               stop interval=0s timeout=60s (db_datafs-stop-interval-0s)
  Resource: db_bakvg (class=ocf provider=heartbeat type=LVM)
   Attributes: exclusive=true volgrpname=bkupvg
   Operations: methods interval=0s timeout=5s (db_bakvg-methods-interval-0s)
               monitor interval=10s timeout=30s (db_bakvg-monitor-interval-10s)
               start interval=0s timeout=30s (db_bakvg-start-interval-0s)
               stop interval=0s timeout=30s (db_bakvg-stop-interval-0s)
  Resource: db_bakfs (class=ocf provider=heartbeat type=Filesystem)
   Attributes: device=/dev/bkupvg/backup directory=/backup fstype=xfs
   Operations: monitor interval=20s timeout=40s (db_bakfs-monitor-interval-20s)
               notify interval=0s timeout=60s (db_bakfs-notify-interval-0s)
               start interval=0s timeout=60s (db_bakfs-start-interval-0s)
               stop interval=0s timeout=60s (db_bakfs-stop-interval-0s)
  Resource: db_orac (class=ocf provider=heartbeat type=oracle)
   Attributes: home=/u01/19c/oracle/product/19c/db_1 shutdown_method=immediate sid=orac user=oracle
   Operations: methods interval=0s timeout=5s (db_orac-methods-interval-0s)
               monitor interval=120s timeout=30s (db_orac-monitor-interval-120s)
               start interval=0s timeout=120s (db_orac-start-interval-0s)
               stop interval=0s timeout=120s (db_orac-stop-interval-0s)
  Resource: alert_mail (class=ocf provider=heartbeat type=MailTo)
   Attributes: email=huaping.li.ur@renesas.com subject=jycdb_cluster-91-failover-occurs
   Operations: monitor interval=10s timeout=10s (alert_mail-monitor-interval-10s)
               start interval=0s timeout=10s (alert_mail-start-interval-0s)
               stop interval=0s timeout=10s (alert_mail-stop-interval-0s)

Stonith Devices:
 Resource: sbd_fencing (class=stonith type=fence_sbd)
  Attributes: devices=/dev/mapper/quochk
  Operations: monitor interval=60s (sbd_fencing-monitor-interval-60s)
Fencing Levels:

Location Constraints:
Ordering Constraints:
Colocation Constraints:
Ticket Constraints:

Alerts:
 No alerts defined

Resources Defaults:
 No defaults set
Operations Defaults:
 No defaults set

Cluster Properties:
 cluster-infrastructure: corosync
 cluster-name: jycdb_cluster
 dc-version: 1.1.19-8.el7-c3c624ea3d
 have-watchdog: true
 last-lrm-refresh: 1727425883
 maintenance-mode: false
 no-quorum-policy: stop
 stonith-enabled: false

Quorum:
  Options:
[root@jycdb01 ~]# pcs config|grep quo
  Attributes: devices=/dev/mapper/quochk
 no-quorum-policy: stop
[root@jycdb01 ~]# pcs property show
Cluster Properties:
 cluster-infrastructure: corosync
 cluster-name: jycdb_cluster
 dc-version: 1.1.19-8.el7-c3c624ea3d
 have-watchdog: true
 last-lrm-refresh: 1727425883
 maintenance-mode: false
 no-quorum-policy: stop
 stonith-enabled: false
[root@jycdb01 ~]# 
[root@jycdb01 ~]# 
[root@jycdb01 ~]# pcs status
Cluster name: jycdb_cluster
Stack: corosync
Current DC: jycdb01-prv (version 1.1.19-8.el7-c3c624ea3d) - partition with quorum
Last updated: Sun Sep 29 12:43:10 2024
Last change: Sun Sep 29 12:27:26 2024 by root via cibadmin on jycdb01-prv

2 nodes configured
11 resources configured

Online: [ jycdb01-prv jycdb02-prv ]

Full list of resources:

 Clone Set: db_ping-clone [db_ping]
     Started: [ jycdb01-prv jycdb02-prv ]
 Resource Group: db_grp
     db_vip     (ocf::heartbeat:IPaddr2):       Started jycdb01-prv
     db_lsnr    (ocf::heartbeat:oralsnr):       Started jycdb01-prv
     db_datavg  (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_datafs  (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_bakvg   (ocf::heartbeat:LVM):   Started jycdb01-prv
     db_bakfs   (ocf::heartbeat:Filesystem):    Started jycdb01-prv
     db_orac    (ocf::heartbeat:oracle):        Started jycdb01-prv
     alert_mail (ocf::heartbeat:MailTo):        Started jycdb01-prv
 sbd_fencing    (stonith:fence_sbd):    Started jycdb02-prv

Daemon Status:
  corosync: active/enabled
  pacemaker: active/enabled
  pcsd: active/enabled
  sbd: active/enabled
[root@jycdb01 ~]# su - oracle
上一次登录:日 9月 29 12:42:35 CST 2024
oracle@jycdb01:/home/oracle>df -hT
ファイルシス               タイプ   サイズ  使用  残り 使用% マウント位置
/dev/mapper/rhel-root      xfs        109G  9.7G   99G    9% /
devtmpfs                   devtmpfs   126G     0  126G    0% /dev
tmpfs                      tmpfs      126G   61M  126G    1% /dev/shm
tmpfs                      tmpfs      126G   11M  126G    1% /run
tmpfs                      tmpfs      126G     0  126G    0% /sys/fs/cgroup
/dev/mapper/rhel-home      xfs        570G   17G  553G    3% /home
/dev/mapper/rhel-u01       xfs        150G   36G  115G   24% /u01
/dev/loop0                 iso9660    4.2G  4.2G     0  100% /media
/dev/sda2                  xfs       1017M  151M  867M   15% /boot
/dev/sda1                  vfat       200M  9.8M  191M    5% /boot/efi
tmpfs                      tmpfs       26G   12K   26G    1% /run/user/42
tmpfs                      tmpfs       26G     0   26G    0% /run/user/1101
/dev/mapper/datavg-oradata xfs        2.9T  1.8T  1.2T   62% /oradata
/dev/mapper/bkupvg-backup  xfs        1.3T  438G  894G   33% /backup
tmpfs                      tmpfs       26G     0   26G    0% /run/user/0
oracle@jycdb01:/home/oracle>sqlplus system/oracle@orac

SQL*Plus: Release 19.0.0.0.0 - Production on 日 9月 29 12:43:41 2024
Version 19.23.0.0.0

Copyright (c) 1982, 2023, Oracle.  All rights reserved.

最終正常ログイン時間: 日 9月  29 2024 11:31:23 +08:00


Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.23.0.0.0
に接続されました。
SQL> select count(*) from dba_users;

  COUNT(*)
----------
        48

SQL> exit
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.23.0.0.0との接続が切断されました。
oracle@jycdb01:/home/oracle>

PCS+Oracle HA实战安装配置参考 - 墨天轮


原文地址:https://blog.csdn.net/jycjyc/article/details/142621356

免责声明:本站文章内容转载自网络资源,如本站内容侵犯了原著者的合法权益,可联系本站删除。更多内容请关注自学内容网(zxcms.com)!