环境说明
主库(RAC) | 备库(FS) | |
数据库版本 | Oracle19.22.0.0 | Oracle19.22.0.0 |
IP 地址 | 192.168.40.90-94 | 192.168.40.95 |
主机名 | hfdb90、hfdb91 | hfdb95 |
DB_UNIQUE_NAME | hfdb | dghfdb |
DB_NAME | hfdb | hfdb |
DB Instances | hfdb1、hfdb2 | dghfdb |
DB STORAGE | RAC | FS |
OS | Euler 21.10 | Euler 21.10 |
Oracle RAC DataGuard 主库配置
主库设置force logging模式
# 在一台RAC上面执行即可
SYS@hfdb1> alter database force logging;
Database altered.
启动归档日志
关闭其中一台数据库,只在一台数据库中执行。
[oracle@hfdb90:/home/oracle]$srvctl stop database -d hfdb
#也可以在每个库里面shutdown immediate;
#rac的两台主机都要做shutdown
##【第二台不关闭 在第一台起startup mount 报ORA-01102: cannot mount database in EXCLUSIVE mode】
SYS@hfdb1> startup mount
alter system set db_recovery_file_dest_size=4G scope=both;
alter system set db_recovery_file_dest='+DGRECOVERY' scope=both;
alter database archivelog; --在一台上面执行
#关闭实例并重新启动数据库
SYS@hfdb1> shutdown immediate
切换几次归档,alter system switch logfile;
到归档目录下面确认下,归档日志是否正常生成。
这时候,需要把第二台rac打开
[oracle@hfdb90:/home/oracle] srvctl start database -d hfdb
--检查归档是否开启:
SYS@hfdb1> select status from v$instance;
STATUS
------------------------------------
OPEN
SYS@hfdb1> set linesize 200
SYS@hfdb1> col NAME format a50
SYS@hfdb1> show parameter recovery
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
db_recovery_file_dest string +DGRECOVERY
db_recovery_file_dest_size big integer 2G
recovery_parallelism integer 0
remote_recovery_file_dest string
SYS@hfdb1> show parameter cluster
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
cdb_cluster boolean FALSE
cdb_cluster_name string
cluster_database boolean TRUE
cluster_database_instances integer 2
cluster_interconnects
SYS@hfdb1> archive log list
Database log mode Archive Mode
Automatic archival Enabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 19
Next log sequence to archive 20
Current log sequence 20
创建备用的重做日志文件
SYS@hfdb1> select a.thread#,a.group#,a.bytes/1024/1024,b.member from v$log a,v$logfile b where a.group#=b.group#;
THREAD# GROUP# A.BYTES/1024/1024 MEMBER
---------- ---------- ----------------- ---------------------------------------------------------------------------
1 2 200 +DGSYSTEM1/HFDB/ONLINELOG/group_2.264.1184265675
1 1 200 +DGSYSTEM1/HFDB/ONLINELOG/group_1.263.1184265675
2 3 200 +DGSYSTEM1/HFDB/ONLINELOG/group_3.267.1184266753
2 4 200 +DGSYSTEM1/HFDB/ONLINELOG/group_4.268.1184266763
#建议比在线重做日志文件多1组。
alter system set standby_file_management=manual scope=both sid='*';
alter database add standby logfile thread 1 group 10 '+DGSYSTEM' size 200m;
alter database add standby logfile thread 1 group 11 '+DGSYSTEM'size 200m;
alter database add standby logfile thread 1 group 12 '+DGSYSTEM' size 200m;
alter database add standby logfile thread 2 group 13 '+DGSYSTEM' size 200m;
alter database add standby logfile thread 2 group 14 '+DGSYSTEM'size 200m;
alter database add standby logfile thread 2 group 15 '+DGSYSTEM' size 200m;
alter system set standby_file_management=auto scope=both sid='*';
#检查重做日志文件:
SYS@hfdb1> select GROUP# ,MEMBER from v$logfile;
GROUP# MEMBER
---------- --------------------------------------------------------------------------------
2 +DGSYSTEM1/HFDB/ONLINELOG/group_2.264.1184265675
1 +DGSYSTEM1/HFDB/ONLINELOG/group_1.263.1184265675
3 +DGSYSTEM1/HFDB/ONLINELOG/group_3.267.1184266753
4 +DGSYSTEM1/HFDB/ONLINELOG/group_4.268.1184266763
10 +DGSYSTEM1/HFDB/ONLINELOG/group_10.270.1185369259
11 +DGSYSTEM1/HFDB/ONLINELOG/group_11.271.1185369267
12 +DGSYSTEM1/HFDB/ONLINELOG/group_12.272.1185369275
13 +DGSYSTEM1/HFDB/ONLINELOG/group_13.273.1185369283
14 +DGSYSTEM1/HFDB/ONLINELOG/group_14.274.1185369293
15 +DGSYSTEM1/HFDB/ONLINELOG/group_15.275.1185369301
10 rows selected.
SYS@hfdb1> select group#,THREAD#,status from v$log;
GROUP# THREAD# STATUS
---------- ---------- ------------------------------------------------
1 1 INACTIVE
2 1 CURRENT
3 2 CURRENT
4 2 INACTIVE
主库参数文件
--【alter system set这中方式是可以在rac两个库里面同时生效】
alter system set db_unique_name='hfdb' scope=spfile sid='*';
alter system set LOG_ARCHIVE_CONFIG='DG_CONFIG=(hfdb,dghfdb)' scope=both sid='*';
alter system set LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=hfdb' scope=both sid='*';
alter system set LOG_ARCHIVE_DEST_2='SERVICE=dghfdb LGWR ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=dghfdb' scope=both sid='*';
alter system set FAL_SERVER='dghfdb' scope=both sid='*';
alter system set fal_client='hfdb' scope=both sid='*';
alter system set DB_FILE_NAME_CONVERT='/oradata/dghfdb','+DGSYSTEM1/hfdb','/oradata/dghfdb','+DGDATA1/hfdb' scope=spfile sid='*';
alter system set LOG_FILE_NAME_CONVERT='/oradata/dghfdb','+DGSYSTEM1/hfdb' scope=spfile sid='*';
alter system set standby_file_management=AUTO scope=both sid='*';
#关闭重启数据库生效 --两个rac都要重启
shutdown immediate
startup
检查参数:
SYS@hfdb1> show parameter conver
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
db_file_name_convert string /oradata/dghfdb, +DGSYSTEM1/hfd
b, /oradata/dghfdb, +DGDATA1/
hfdb
log_file_name_convert string /oradata/dghfdb, +DGSYSTEM1/hfd
b
pdb_file_name_convert string
SYS@hfdb1> show parameter archive;
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
archive_lag_target integer 0
log_archive_config string DG_CONFIG=(hfdb,dghfdb)
log_archive_dest string
log_archive_dest_1 string LOCATION=USE_DB_RECOVERY_FlLE_
DEST VALID_FOR=(ALL_LOGFILES,A
LL_ROLES) DB_UNIQUE_NAME=hfdb
log_archive_dest_10 string
log_archive_dest_11 string
log_archive_dest_12 string
log_archive_dest_13 string
log_archive_dest_14 string
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_15 string
log_archive_dest_16 string
log_archive_dest_17 string
log_archive_dest_18 string
log_archive_dest_19 string
log_archive_dest_2 string SERVICE=dghfdb LGWR ASYNC VALI
D_FOR=(ONLINE_LOGFILES,PRIMARY
_ROLE) DB_UNIQUE_NAME=dghfdb
log_archive_dest_20 string
log_archive_dest_21 string
log_archive_dest_22 string
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_23 string
log_archive_dest_24 string
log_archive_dest_25 string
log_archive_dest_26 string
log_archive_dest_27 string
log_archive_dest_28 string
log_archive_dest_29 string
log_archive_dest_3 string
log_archive_dest_30 string
log_archive_dest_31 string
log_archive_dest_4 string
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_5 string
log_archive_dest_6 string
log_archive_dest_7 string
log_archive_dest_8 string
log_archive_dest_9 string
log_archive_dest_state_1 string enable
log_archive_dest_state_10 string enable
log_archive_dest_state_11 string enable
log_archive_dest_state_12 string enable
log_archive_dest_state_13 string enable
log_archive_dest_state_14 string enable
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_state_15 string enable
log_archive_dest_state_16 string enable
log_archive_dest_state_17 string enable
log_archive_dest_state_18 string enable
log_archive_dest_state_19 string enable
log_archive_dest_state_2 string enable
log_archive_dest_state_20 string enable
log_archive_dest_state_21 string enable
log_archive_dest_state_22 string enable
log_archive_dest_state_23 string enable
log_archive_dest_state_24 string enable
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_state_25 string enable
log_archive_dest_state_26 string enable
log_archive_dest_state_27 string enable
log_archive_dest_state_28 string enable
log_archive_dest_state_29 string enable
log_archive_dest_state_3 string enable
log_archive_dest_state_30 string enable
log_archive_dest_state_31 string enable
log_archive_dest_state_4 string enable
log_archive_dest_state_5 string enable
log_archive_dest_state_6 string enable
NAME TYPE VALUE
------------------------------------ --------------------------------- ------------------------------
log_archive_dest_state_7 string enable
log_archive_dest_state_8 string enable
log_archive_dest_state_9 string enable
log_archive_duplex_dest string
log_archive_format string %t_%s_%r.dbf
log_archive_max_processes integer 4
log_archive_min_succeed_dest integer 1
log_archive_start boolean FALSE
log_archive_trace integer 0
SYS@hfdb1>
创建pfile文件,备库要用
SYS@hfdb1> create pfile ='/home/oracle/standby.pfile' from spfile;
File created.
主库配置监听与TNS
#TNS文件要配置别名解析 --两个rac主机都要配置
##/oracle/app/oracle/product/19c/db_1/network/admin
hfdb =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = hfdbscan)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = hfdb)
)
)
dghfdb =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.40.95)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = dghfdb)
)
)
RAC集群下dataguard备库配置
拷贝pfile文件到备库:
[oracle@hfdb90:/home/oracle]$scp standby.pfile 192.168.40.95:/home/oracle
--1)创建项目的目录
[oracle@hfdb95:/home/oracle]$mkdir -p /oracle/app/oracle/admin/dghfdb/adump
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/
[oracle@hfdb95:/home/oracle]$df -h
Filesystem Size Used Avail Use% Mounted on
/dev/sda2 72G 13G 60G 17% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 5.0G 0 5.0G 0% /dev/shm
tmpfs 3.9G 9.4M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
/dev/sdb1 50G 7.2G 43G 15% /oracle
tmpfs 799M 12K 799M 1% /run/user/42
tmpfs 799M 0 799M 0% /run/user/0
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/datafile
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/controlfile
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/onlinelog
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/tempfile
准备备库参数文件
[oracle@hfdb95:/home/oracle]$vi standby.pfile
*.audit_trail='NONE'
*.compatible='19.0.0'
*.db_block_size=8192
*.db_domain=''
*.db_name='hfdb'
*.diagnostic_dest='/oracle/app/oracle'
*.open_cursors=2000
*.pga_aggregate_target=1073741824
*.processes=2000
*.remote_login_passwordfile='exclusive'
*.sessions=2205
*.sga_target=3355443200
*.audit_file_dest='/oracle/app/oracle/admin/dghfdb/adump'
*.control_files='/oradata/dghfdb/control01.ctl','/oradata/dghfdb/control02.ctl'
*.db_unique_name='dghfdb'
*.LOG_ARCHIVE_CONFIG='DG_CONFIG=(dghfdb,hfdb)'
*.LOG_ARCHIVE_DEST_1='LOCATION=USE_DB_RECOVERY_FILE_DEST VALID_FOR=(ALL_LOGFILES,ALL_ROLES) DB_UNIQUE_NAME=dghfdb'
*.LOG_ARCHIVE_DEST_2='SERVICE=hfdb LGWR ASYNC VALID_FOR=(ONLINE_LOGFILES,PRIMARY_ROLE) DB_UNIQUE_NAME=hfdb'
*.log_archive_format='%t_%s_%r.arc'
*.fal_client='dghfdb'
*.FAL_SERVER='hfdb'
*.DB_FILE_NAME_CONVERT='+DGDATA1/hfdb','/oradata/dghfdb','+DGSYSTEM1/hfdb','/oradata/dghfdb'
*.LOG_FILE_NAME_CONVERT='+DGSYSTEM1/hfdb' ,'/oradata/dghfdb'
*.standby_file_management=AUTO
*.db_recovery_file_dest_size=2g
*.db_recovery_file_dest='/archive'
*.undo_tablespace='UNDOTBS1'
*.enable_pluggable_database=true
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/dbs]$cp /home/oracle/standby2.pfile .
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/dbs]$mv standby2.pfile initdghfdb.ora
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/dbs]$ll
total 12
-rw-rw----. 1 oracle oinstall 1544 Nov 18 14:02 hc_hfdb.dat
-rw-r--r--. 1 oracle oinstall 1078 Nov 18 14:02 initdghfdb.ora
-rw-r--r--. 1 oracle oinstall 3079 May 14 2015 init.ora
准备数据库密码文件
#密码文件可以拷贝主库的,也可以自己创建,但是19c创建密码对密码复杂度要求比较高,这里是拷贝密码的方式。
ASMCMD> ls
pwdhfdb.256.1184254885
pwdhfdb.257.1184265483
ASMCMD> pwcopy pwdhfdb.256.1184254885 /home/oracle/orapwdhfdb
ASMCMD-9463: operation failed due to lack of write permissions
ASMCMD> pwcopy pwdhfdb.256.1184254885 /home/grid/orapwdhfdb
copying +dgsystem1/hfdb/password/pwdhfdb.256.1184254885 -> /home/grid/orapwdhfdb
[root@hfdb90 grid]# scp orapwdhfdb 192.168.40.40:/oracle/app/oracle/product/19c/db_1/dbs
The authenticity of host '192.168.40.40 (192.168.40.40)' can't be established.
ECDSA key fingerprint is SHA256:4aQmRUrp6cpVXTfHLhO+D7WfPLevTnVZo7UidRmm6TE.
ECDSA key fingerprint is MD5:7e:ec:25:e6:20:92:85:89:22:2a:f3:59:dd:ef:8f:7e.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '192.168.40.40' (ECDSA) to the list of known hosts.
root@192.168.40.40's password:
orapwdhfdb 100% 2048 1.1MB/s 00:00
[root@hfdb90 grid]#
[root@hfdb95 dbs]# ll
total 16
-rw-rw----. 1 oracle oinstall 1544 Nov 18 14:02 hc_hfdb.dat
-rw-r--r--. 1 oracle oinstall 1078 Nov 18 14:02 initdghfdb.ora
-rw-r--r--. 1 oracle oinstall 3079 May 14 2015 init.ora
-rw-r-----. 1 root root 2048 Nov 18 14:15 orapwdhfdb
[root@hfdb95 dbs]# chown oracle:oinstall orapwdhfdb
[root@hfdb95 dbs]# ll
total 16
-rw-rw----. 1 oracle oinstall 1544 Nov 18 14:02 hc_hfdb.dat
-rw-r--r--. 1 oracle oinstall 1078 Nov 18 14:02 initdghfdb.ora
-rw-r--r--. 1 oracle oinstall 3079 May 14 2015 init.ora
-rw-r-----. 1 oracle oinstall 2048 Nov 18 14:15 orapwdhfdb
[root@hfdb95 dbs]# mv orapwdhfdb orapwdghfdb
[root@hfdb95 dbs]# ll
total 16
-rw-rw----. 1 oracle oinstall 1544 Nov 18 14:02 hc_hfdb.dat
-rw-r--r--. 1 oracle oinstall 1078 Nov 18 14:02 initdghfdb.ora
-rw-r--r--. 1 oracle oinstall 3079 May 14 2015 init.ora
-rw-r-----. 1 oracle oinstall 2048 Nov 18 14:15 orapwdghfdb
备库配置监听与TNS
/oracle/app/oracle/product/11.2.0/db_1/network/admin配置静态监听:listener.ora
# listener.ora Network Configuration File: /oracle/app/oracle/product/11.2.0/db_1/network/admin/listener.ora
# Generated by Oracle configuration tools.
SID_LIST_LISTENER=
(SID_LIST=
(SID_DESC=
(GLOBAL_DBNAME=dghfdb)
(SID_NAME=dghfdb)
(ORACLE_HOME=/oracle/app/oracle/product/19c/db_1)
)
)
LISTENER =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = hfdb95)(PORT = 1521))
(ADDRESS = (PROTOCOL = IPC)(KEY = EXTPROC1521))
)
)
配置备库tns:
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/network/admin]$cat tnsnames.ora
# tnsnames.ora Network Configuration File: /oracle/app/oracle/product/19c/db_1/network/admin/tnsnames.ora
# Generated by Oracle configuration tools.
LISTENER_HFDB =
(ADDRESS = (PROTOCOL = TCP)(HOST = hfdb95)(PORT = 1521))
dghfdb =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.40.95)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = dghfdb)
)
)
hfdb =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.40.92)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = hfdb)
)
)
这里最好使用tnsping命令,分别在主库和备库上面,检查下dghfdb和hfdb是否正常。
修改备库的唯一数据库名:
[oracle@hferp164:/home/oracle]$cat ~/.bash_profile
# .bash_profile
# Get the aliases and functions
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
# User specific environment and startup programs
PATH=$PATH:$HOME/.local/bin:$HOME/bin
export PATH
PS1="[`whoami`@`hostname`:"'$PWD]$'
export PS1
export TMP=/tmp
export LANG=en_US.UTF8
export TMPDIR=$TMP
export ORACLE_UNQNAME=dghfdb
ORACLE_SID=dghfdb; export ORACLE_SID
ORACLE_BASE=/oracle/app/oracle; export ORACLE_BASE
ORACLE_HOME=$ORACLE_BASE/product/19c/db_1; export ORACLE_HOME
ORACLE_TERM=xterm; export ORACLE_TERM
NLS_DATE_FORMAT="yyyy-mm-dd HH24:MI:SS"; export NLS_DATE_FORMAT
NLS_LANG=AMERICAN_AMERICA.UTF8;export NLS_LANG
PATH=.:$PATH:$HOME/.local/bin:$HOME/bin:$ORACLE_HOME/bin; export PATH
THREADS_FLAG=native; export THREADS_FLAG
umask=022
alias sqlplus="rlwrap sqlplus"
alias rman="rlwrap rman"
[oracle@hferp164:/home/oracle]$检查: --【退出重新进入oracle用户,环境变量生效】
[oracle@hfdb95:/home/oracle]$env |grep ORACLE
ORACLE_UNQNAME=dghfdb
ORACLE_SID=dghfdb
ORACLE_BASE=/oracle/app/oracle
ORACLE_TERM=xterm
ORACLE_HOME=/oracle/app/oracle/product/19c/db_1
[oracle@hfdb95:/home/oracle]$
RAC集群下dataguard物理备库创建
启动备库为nomount状态:
[oracle@hfdb95:/backup]$orapwd file=/oracle/app/oracle/product/19c/db_1/dbs/orapwdgerpdb password=oracle --【拷贝过来的密码进不去,可以用这个】
#这边是验证前面你创建的密码和tns是否有问题,遇到问题要解决,否则后面主库适用rman连接时也回报错
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/network/admin]$sqlplus sys/oracle@dghfdb as sysdba
报错:TNS-01189: The listener could not authenticate the user -- /etc/hosts里面的IP地址配置错误,修改,重启监听。
报错:ORA-12154: TNS:could not resolve the connect identifier specified -- tns里面没有配置dghfdb
LISTENER_HFDB =
(ADDRESS = (PROTOCOL = TCP)(HOST = hfdb95)(PORT = 1521))
dghfdb =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = 192.168.40.40)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = dghfdb)
)
)
#备库需要启动数据库到nomount状态
[oracle@hfdb95:/oracle/app/oracle/product/19c/db_1/network/admin]$sqlplus sys/oracle@dghfdb as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Mon Nov 18 15:20:01 2024
Version 19.3.0.0.0
Copyright (c) 1982, 2019, Oracle. All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.3.0.0.0
SYS@dghfdb> startup nomount
[oracle@hfdb90:/soft/hfdb]$rman target sys/oracle auxiliary sys/oracle@dghfdb
Recovery Manager: Release 19.0.0.0.0 - Production on Fri Jan 10 14:08:30 2025
Version 19.22.0.0.0
Copyright (c) 1982, 2019, Oracle and/or its affiliates. All rights reserved.
connected to target database: HFDB (DBID=784112156)
connected to auxiliary database: HFDB (not mounted)
RMAN> duplicate target database for standby from active database nofilenamecheck;
Starting Duplicate Db at 2025-01-10 14:08:42
using target database control file instead of recovery catalog
allocated channel: ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: SID=766 device type=DISK
contents of Memory Script:
{
backup as copy reuse
passwordfile auxiliary format '/oracle/app/oracle/product/19c/db_1/dbs/orapwdghfdb' ;
}
executing Memory Script
Starting backup at 2025-01-10 14:08:44
allocated channel: ORA_DISK_1
channel ORA_DISK_1: SID=1544 instance=hfdb1 device type=DISK
Finished backup at 2025-01-10 14:08:46
contents of Memory Script:
{
backup as copy current controlfile for standby auxiliary format '/oradata/dghfdb/control01.ctl';
restore clone primary controlfile to '/oradata/dghfdb/control02.ctl' from
'/oradata/dghfdb/control01.ctl';
}
executing Memory Script
Starting backup at 2025-01-10 14:08:46
using channel ORA_DISK_1
channel ORA_DISK_1: starting datafile copy
copying standby control file
output file name=/oradata/dghfdb/control01.ctl tag=TAG20250110T140846
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:01
Finished backup at 2025-01-10 14:08:51
Starting restore at 2025-01-10 14:08:51
using channel ORA_AUX_DISK_1
channel ORA_AUX_DISK_1: copied control file copy
Finished restore at 2025-01-10 14:08:52
contents of Memory Script:
{
sql clone 'alter database mount standby database';
}
executing Memory Script
sql statement: alter database mount standby database
Using previous duplicated file /oradata/dghfdb/datafile/system.257.1189796855 for datafile 1 with checkpoint SCN of 2740604
Using previous duplicated file /oradata/dghfdb/datafile/sysaux.258.1189796919 for datafile 3 with checkpoint SCN of 2740774
Using previous duplicated file /oradata/dghfdb/datafile/undotbs1.259.1189796955 for datafile 4 with checkpoint SCN of 2740937
contents of Memory Script:
{
set newname for tempfile 1 to
"/oradata/dghfdb/tempfile/temp.264.1189797055";
set newname for tempfile 2 to
"/oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/temp.268.1189798711";
set newname for tempfile 3 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/temp.276.1189800489";
switch clone tempfile all;
set newname for datafile 1 to
"/oradata/dghfdb/datafile/system.257.1189796855";
set newname for datafile 3 to
"/oradata/dghfdb/datafile/sysaux.258.1189796919";
set newname for datafile 4 to
"/oradata/dghfdb/datafile/undotbs1.259.1189796955";
set newname for datafile 5 to
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/system.265.1189798669";
set newname for datafile 6 to
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/sysaux.266.1189798669";
set newname for datafile 7 to
"/oradata/dghfdb/datafile/users.260.1189796957";
set newname for datafile 8 to
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/undotbs1.267.1189798669";
set newname for datafile 9 to
"/oradata/dghfdb/datafile/undotbs2.269.1189799129";
set newname for datafile 10 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/system.274.1189800445";
set newname for datafile 11 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/sysaux.275.1189800445";
set newname for datafile 12 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undotbs1.273.1189800443";
set newname for datafile 13 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undo_2.277.1189800557";
set newname for datafile 14 to
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/users.278.1189800573";
backup as copy reuse
datafile 5 auxiliary format
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/system.265.1189798669" datafile
6 auxiliary format
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/sysaux.266.1189798669" datafile
7 auxiliary format
"/oradata/dghfdb/datafile/users.260.1189796957" datafile
8 auxiliary format
"/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/undotbs1.267.1189798669" datafile
9 auxiliary format
"/oradata/dghfdb/datafile/undotbs2.269.1189799129" datafile
10 auxiliary format
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/system.274.1189800445" datafile
11 auxiliary format
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/sysaux.275.1189800445" datafile
12 auxiliary format
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undotbs1.273.1189800443" datafile
13 auxiliary format
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undo_2.277.1189800557" datafile
14 auxiliary format
"/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/users.278.1189800573" ;
sql 'alter system archive log current';
}
executing Memory Script
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
renamed tempfile 1 to /oradata/dghfdb/tempfile/temp.264.1189797055 in control file
renamed tempfile 2 to /oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/temp.268.1189798711 in control file
renamed tempfile 3 to /oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/temp.276.1189800489 in control file
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
executing command: SET NEWNAME
Starting backup at 2025-01-10 14:08:59
using channel ORA_DISK_1
channel ORA_DISK_1: starting datafile copy
input datafile file number=00011 name=+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/DATAFILE/sysaux.275.1189800445
output file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/sysaux.275.1189800445 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:45
channel ORA_DISK_1: starting datafile copy
input datafile file number=00006 name=+DGSYSTEM/HFDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/sysaux.266.1189798669
output file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/sysaux.266.1189798669 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:35
channel ORA_DISK_1: starting datafile copy
input datafile file number=00010 name=+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/DATAFILE/system.274.1189800445
output file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/system.274.1189800445 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:35
channel ORA_DISK_1: starting datafile copy
input datafile file number=00005 name=+DGSYSTEM/HFDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/system.265.1189798669
output file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/system.265.1189798669 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:35
channel ORA_DISK_1: starting datafile copy
input datafile file number=00008 name=+DGSYSTEM/HFDB/86B637B62FE07A65E053F706E80A27CA/DATAFILE/undotbs1.267.1189798669
output file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/undotbs1.267.1189798669 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:25
channel ORA_DISK_1: starting datafile copy
input datafile file number=00012 name=+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/DATAFILE/undotbs1.273.1189800443
output file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undotbs1.273.1189800443 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:25
channel ORA_DISK_1: starting datafile copy
input datafile file number=00013 name=+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/DATAFILE/undo_2.277.1189800557
output file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undo_2.277.1189800557 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:25
channel ORA_DISK_1: starting datafile copy
input datafile file number=00009 name=+DGSYSTEM/HFDB/DATAFILE/undotbs2.269.1189799129
output file name=/oradata/dghfdb/datafile/undotbs2.269.1189799129 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:03
channel ORA_DISK_1: starting datafile copy
input datafile file number=00007 name=+DGSYSTEM/HFDB/DATAFILE/users.260.1189796957
output file name=/oradata/dghfdb/datafile/users.260.1189796957 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:01
channel ORA_DISK_1: starting datafile copy
input datafile file number=00014 name=+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/DATAFILE/users.278.1189800573
output file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/users.278.1189800573 tag=TAG20250110T140859
channel ORA_DISK_1: datafile copy complete, elapsed time: 00:00:01
Finished backup at 2025-01-10 14:12:54
sql statement: alter system archive log current
contents of Memory Script:
{
backup as copy reuse
archivelog like "+DGRECOVERY/HFDB/ARCHIVELOG/2025_01_10/thread_1_seq_20.269.1190038375" auxiliary format
"/archive/DGHFDB/archivelog/2025_01_10/o1_mf_1_20_%u_.arc" archivelog like
"+DGRECOVERY/HFDB/ARCHIVELOG/2025_01_10/thread_2_seq_7.270.1190038379" auxiliary format
"/archive/DGHFDB/archivelog/2025_01_10/o1_mf_2_7_%u_.arc" ;
catalog clone start with "/archive/DGHFDB/archivelog/";
catalog clone datafilecopy "/oradata/dghfdb/datafile/system.257.1189796855",
"/oradata/dghfdb/datafile/sysaux.258.1189796919",
"/oradata/dghfdb/datafile/undotbs1.259.1189796955";
switch clone datafile 1 to datafilecopy
"/oradata/dghfdb/datafile/system.257.1189796855";
switch clone datafile 3 to datafilecopy
"/oradata/dghfdb/datafile/sysaux.258.1189796919";
switch clone datafile 4 to datafilecopy
"/oradata/dghfdb/datafile/undotbs1.259.1189796955";
switch clone datafile all;
}
executing Memory Script
Starting backup at 2025-01-10 14:13:03
using channel ORA_DISK_1
channel ORA_DISK_1: starting archived log copy
input archived log thread=2 sequence=7 RECID=13 STAMP=1190038383
output file name=/archive/DGHFDB/archivelog/2025_01_10/o1_mf_2_7_183et2rg_.arc RECID=0 STAMP=0
channel ORA_DISK_1: archived log copy complete, elapsed time: 00:00:16
channel ORA_DISK_1: starting archived log copy
input archived log thread=1 sequence=20 RECID=12 STAMP=1190038378
output file name=/archive/DGHFDB/archivelog/2025_01_10/o1_mf_1_20_193et2s0_.arc RECID=0 STAMP=0
channel ORA_DISK_1: archived log copy complete, elapsed time: 00:00:07
Finished backup at 2025-01-10 14:13:27
searching for all files that match the pattern /archive/DGHFDB/archivelog/
List of Files Unknown to the Database
=====================================
File Name: /archive/DGHFDB/archivelog/2025_01_10/o1_mf_2_7_183et2rg_.arc
File Name: /archive/DGHFDB/archivelog/2025_01_10/o1_mf_1_20_193et2s0_.arc
cataloging files...
cataloging done
List of Cataloged Files
=======================
File Name: /archive/DGHFDB/archivelog/2025_01_10/o1_mf_2_7_183et2rg_.arc
File Name: /archive/DGHFDB/archivelog/2025_01_10/o1_mf_1_20_193et2s0_.arc
cataloged datafile copy
datafile copy file name=/oradata/dghfdb/datafile/system.257.1189796855 RECID=4 STAMP=1190038407
cataloged datafile copy
datafile copy file name=/oradata/dghfdb/datafile/sysaux.258.1189796919 RECID=5 STAMP=1190038407
cataloged datafile copy
datafile copy file name=/oradata/dghfdb/datafile/undotbs1.259.1189796955 RECID=6 STAMP=1190038407
datafile 1 switched to datafile copy
input datafile copy RECID=4 STAMP=1190038407 file name=/oradata/dghfdb/datafile/system.257.1189796855
datafile 3 switched to datafile copy
input datafile copy RECID=5 STAMP=1190038407 file name=/oradata/dghfdb/datafile/sysaux.258.1189796919
datafile 4 switched to datafile copy
input datafile copy RECID=6 STAMP=1190038407 file name=/oradata/dghfdb/datafile/undotbs1.259.1189796955
datafile 5 switched to datafile copy
input datafile copy RECID=7 STAMP=1190038408 file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/system.265.1189798669
datafile 6 switched to datafile copy
input datafile copy RECID=8 STAMP=1190038408 file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/sysaux.266.1189798669
datafile 7 switched to datafile copy
input datafile copy RECID=9 STAMP=1190038408 file name=/oradata/dghfdb/datafile/users.260.1189796957
datafile 8 switched to datafile copy
input datafile copy RECID=10 STAMP=1190038408 file name=/oradata/dghfdb/86b637b62fe07a65e053f706e80a27ca/datafile/undotbs1.267.1189798669
datafile 9 switched to datafile copy
input datafile copy RECID=11 STAMP=1190038408 file name=/oradata/dghfdb/datafile/undotbs2.269.1189799129
datafile 10 switched to datafile copy
input datafile copy RECID=12 STAMP=1190038408 file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/system.274.1189800445
datafile 11 switched to datafile copy
input datafile copy RECID=13 STAMP=1190038408 file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/sysaux.275.1189800445
datafile 12 switched to datafile copy
input datafile copy RECID=14 STAMP=1190038408 file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undotbs1.273.1189800443
datafile 13 switched to datafile copy
input datafile copy RECID=15 STAMP=1190038408 file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/undo_2.277.1189800557
datafile 14 switched to datafile copy
input datafile copy RECID=16 STAMP=1190038408 file name=/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/datafile/users.278.1189800573
Finished Duplicate Db at 2025-01-10 14:15:48
RMAN>
查看备库(dghfdbSYS@dghfdb> select status from v$instance;
STATUS
------------------------------------
MOUNTED
SYS@dghfdb> select member from v$logfile;
MEMBER
--------------------------------------------------------------------------------
/oradata/dghfdb/onlinelog/group_2.264.1184265675
/oradata/dghfdb/onlinelog/group_1.263.1184265675
/oradata/dghfdb/onlinelog/group_3.267.1184266753
/oradata/dghfdb/onlinelog/group_4.268.1184266763
/oradata/dghfdb/onlinelog/group_10.270.1185369259
/oradata/dghfdb/onlinelog/group_11.271.1185369267
/oradata/dghfdb/onlinelog/group_12.272.1185369275
/oradata/dghfdb/onlinelog/group_13.273.1185369283
/oradata/dghfdb/onlinelog/group_14.274.1185369293
/oradata/dghfdb/onlinelog/group_15.275.1185369301
10 rows selected.
SYS@dghfdb> select name from v$tempfile;
NAME
--------------------------------------------------------------------------------
/oradata/dghfdb/tempfile/temp.265.1184265697
SYS@dghfdb> show parameter control
NAME TYPE
------------------------------------ ---------------------------------
VALUE
------------------------------
control_file_record_keep_time integer
7
control_files string
/oradata/dghfdb/control01.ctl,
/oradata/dghfdb/control02.ctl
control_management_pack_access string
DIAGNOSTIC+TUNING
SYS@dghfdb> select name from v$datafile;
NAME
--------------------------------------------------------------------------------
/oradata/dghfdb/datafile/system.258.1184265513
/oradata/dghfdb/datafile/sysaux.259.1184265569
/oradata/dghfdb/datafile/undotbs1.260.1184265603
/oradata/dghfdb/datafile/undotbs2.266.1184266663
/oradata/dghfdb/datafile/users.261.1184265607
SYS@dghfdb>
同步:
SQL> alter database recover managed standby database disconnect from session; -- 【在备库上面启动MRP进程,同步没有同步过来的文件】
SQL> alter database recover managed standby database cancel;
SQL> archive log list
Database log mode Archive Mode
Automatic archival Enabled
Archive destination USE_DB_RECOVERY_FILE_DEST
Oldest online log sequence 0
Next log sequence to archive 0
Current log sequence 0
使用这个检查:
select thread#, max(sequence#) "Last Primary Seq Generated" from v$archived_log val,
v$database vdb where val.resetlogs_change# = vdb.resetlogs_change#
group by thread# order by 1;
打开数据库:
SQL> alter database open;
打开同步:
SQL> alter database recover managed standby database disconnect from session;
SQL> select name,open_mode,protection_mode,database_role,log_mode from v$database;
NAME OPEN_MODE PROTECTION_MODE DATABASE_ROLE
--------- -------------------- -------------------- ----------------
LOG_MODE
------------
HFERPDB READ ONLY WITH APPLY MAXIMUM PERFORMANCE PHYSICAL STANDBY
ARCHIVELOG
测试:
SQL> create table hftable1 as select name from v$datafile;
SQL> alter system switch logfile;
#ADG是可以正常同步的,但是备库执行 archive log list 时显示都为 0,因此比较好奇,于是查询mos发现:
#参考MOS文档:Archive Log List Showing 0 At Standby, But Standby Is Completely In Sync With Primary! (Doc ID 2041137.1)
#12C针对物理备库redo log做出了调整,可以通过以下SQL查看:
select thread#, max(sequence#) "Last Primary Seq Generated" from v$archived_log val, v$database vdb where val.resetlogs_change# = vdb.resetlogs_change# group by thread# order by 1;
1. 报错1:
Starting backup at 2025-01-09 21:06:16 using channel ORA_DISK_1 channel ORA_DISK_1: starting datafile copy RMAN-00571: =========================================================== RMAN-00569: =============== ERROR MESSAGE STACK FOLLOWS =============== RMAN-00571: =========================================================== RMAN-03002: failure of Duplicate Db command at 01/09/2025 21:06:20 RMAN-05501: aborting duplication of target database RMAN-03015: error occurred in stored script Memory Script RMAN-03009: failure of backup command on ORA_DISK_1 channel at 01/09/2025 21:06:19 ORA-00245: control file backup failed; in Oracle RAC, target might not be on shared storage |
解决1;
RMAN> CONFIGURE SNAPSHOT CONTROLFILE NAME TO '+dgsystem/hfdb/snapcf_dwdb1.f';
2. 报错2:
解决2:ORA-65093:multitenant container database not set up properly
备库参数文件中添加*.enable_pluggable_database=true参数。
3. 报错3:
解决3:
ORA-17628:Oracle error 19505 returned by remote Oracle server
ORA-19505:failed to identify file
由于主库是pdb数据库,备库中有部分目录没有创建,在备库中创建好对应的目录后,执行正常。
[oracle@hfdb95:/oradata/dghfdb]$mkdir -p 2b1d9602bd721a65e0635a28a8c063ad/datafile/
[oracle@hfdb95:/oradata/dghfdb]$mkdir -p 86b637b62fe07a65e053f706e80a27ca/datafile/
这里建的文件名称字符不能大写。
DataGuard日常检查与数据同步测试
alter database recover managed standby database disconnect from session; --【开启恢复管理,开启后启动mrp进程】
alter database recover managed standby database cancel; --【取消恢复管理】
#主库1:
SYS@hfdb1> SELECT UNIQUE THREAD#, MAX(SEQUENCE#) OVER(PARTITION BY THREAD#) LAST FROM V$ARCHIVED_LOG;
THREAD# LAST
---------- ----------
2 14
1 27
#主库2:
SYS@hfdb2> SELECT UNIQUE THREAD#, MAX(SEQUENCE#) OVER(PARTITION BY THREAD#) LAST FROM V$ARCHIVED_LOG;
THREAD# LAST
---------- ----------
2 14
1 27
#备库
SYS@dghfdb> SELECT UNIQUE THREAD#, MAX(SEQUENCE#) OVER(PARTITION BY THREAD#) LAST FROM V$ARCHIVED_LOG;
THREAD# LAST
---------- ----------
2 14
1 27
#备库有没有日志丢失
SYS@dghfdb> SELECT THREAD#, LOW_SEQUENCE#, HIGH_SEQUENCE# FROM V$ARCHIVE_GAP;
#查询没有被应用的归档日志
SYS@hfdb1> select thread#,sequence#,first_time,next_time,applied from v$archived_log where applied='NO';
#测试:
create tablespace hfdb1 datafile '+dgdata1' size 10m autoextend off;
create user hfdb1 identified by hfdb1 default tablespace hfdb1 temporary tablespace temp;
grant dba to hfdb1;
conn hfdb1/hfdb1;
create table hfdb1.test (c1 varchar2(10),c2 number);
insert into hfdb1.test values('1ab1','424');
commit;
conn / as sysdba
SQL> select name from v$datafile;
SQL> select dest_name,status,error from v$archive_dest;
select process,client_process,sequence#,status from v$managed_standby;
select archived_thread#,archived_seq#,applied_thread#,applied_seq#,dest_name from v$archive_dest_status;
SQL> alter database recover managed standby database disconnect from session; --归档后同步 --【生产环境建议这个】
SQL> alter database recover managed standby database cancel; -取消
SQL> alter database recover managed standby database using current logfile disconnect; --实时同步--【不需要切换日志】
dg 中临时表的处理
在主库中检查 tempfile 参数
[oracle@hfdb90:/home/oracle]$sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Mon Jan 13 08:53:06 2025
Version 19.22.0.0.0
Copyright (c) 1982, 2023, Oracle. All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.22.0.0.0
SYS@hfdb1> select name from v$tempfile;
NAME
--------------------------------------------------------------------------------
+DGSYSTEM/HFDB/TEMPFILE/temp.264.1189797055
+DGSYSTEM/HFDB/2B1D2C09CCA0CBC2E0635A28A8C01000/TEMPFILE/temp.268.1189798711
+DGSYSTEM/HFDB/2B1D9602BD721A65E0635A28A8C063AD/TEMPFILE/temp.276.1189800489
在备库中检查
[oracle@hfdb95:/home/oracle]$sqlplus / as sysdba
SQL*Plus: Release 19.0.0.0.0 - Production on Mon Jan 13 08:18:16 2025
Version 19.22.0.0.0
Copyright (c) 1982, 2023, Oracle. All rights reserved.
Connected to:
Oracle Database 19c Enterprise Edition Release 19.0.0.0.0 - Production
Version 19.22.0.0.0
SQL> select status from v$instance;
STATUS
------------------------------------
OPEN
SQL> select name from v$tempfile;
NAME
--------------------------------------------------------------------------------
/oradata/dghfdb/tempfile/temp.264.1189797055
/oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/temp.268.1189798711
/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/temp.276.1189800489
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/
[oracle@hfdb95:/home/oracle]$mkdir -p /oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/
如果备库和主库不一致,需要把主库中的临时表删除,在主库中新建临时表。通过 adg 同步到备库。
--在备库中删除
alter session set container =pdb$seed;
alter database tempfile '/oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/temp.268.1189798711' drop including datafiles;
--在备库中添加
alter session set container =pdb$seed;
alter tablespace temp add tempfile '/oradata/dghfdb/2b1d2c09cca0cbc2e0635a28a8c01000/tempfile/temp.268.1189798711' size 200m autoextend off;
--在备库中删除
alter session set container =pdb;
alter database tempfile '/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/temp.276.1189800489' drop including datafiles;
--在备库中添加
alter session set container =pdb;
alter tablespace temp add tempfile '/oradata/dghfdb/2b1d9602bd721a65e0635a28a8c063ad/tempfile/temp.276.1189800489' size 200M autoextend off;
备份恢复中归档保留策略
主+备都要执行:
alter system set "_log_deletion_policy"= ALL scope=both sid='*';
CONFIGURE ARCHIVELOG DELETION POLICY TO APPLIED ON ALL STANDBY;
RAC dg环境下,如何启停数据库
如何关机
先关主库(两个实例),再关备库
主库:
Shutdown immediate --(srvctl stop database -d hfdb)
停止监听:lsnrctl stop
备库:
SQL> alter database recover managed standby database cancel; -取消日志应用进程
Shutdown immediate
停止监听:lsnrctl stop
如何开机
先开备库,再开主库
主库:
开启监听:lsnrctl start
startup --(srvctl start database -d hfdb)
备库:
开启监听:lsnrctl start
Startup
SQL> alter database recover managed standby database disconnect from session;
检查:
select thread#, max(sequence#) "Last Primary Seq Generated" from v$archived_log val, v$database vdb where val.resetlogs_change# = vdb.resetlogs_change# group by thread# order by 1;
1.1. 备库切换成Snapshot
Oracle 11g 中的 Data Guard 不仅引入了Active Data Guard实时查询特性,还提供了snapshot stadnby 数据库的功能。快照备库可以临时提供读写能力,以便线上业务系统进行测试相关场景任务。
snapshot standby database 会从主库接收和归档 redo 数据,但是不会对 redo 数据进行apply。仅当snapshot standby的角色转换回physical standby database 的时候才会应用之前接收的redo数据。 然后擦除作为snapshot standby 数据库时所有的DML和DDL操作。
Snapshot standby 数据库不能进行switchover 或者failover 操作。 在转换之前,必须先从snapshot standby 转换成physical standby 以后才可以转换
Snapshot standby 不支持最大保护模式(最大保护模式需要实时应用)
snapshot standby实际上是基于flashback database的运行机制,因此必须设置闪回区目录及大小。确保fast recovery area已经配置并确保大小合适,但不一定需要启动flashback database功能。
-- 查看快速恢复闪回区
show parameter recover
-- 设置闪回区的大小和路径(如果已经配置此步骤可以忽略)
闪回区的大小根据磁盘空间设置,闪回区的目录注意权限
alter system set db_recovery_file_dest_size=20G scope=both;
alter system set db_recovery_file_dest='/oradata/flashback' scope=both;
-- 备库关闭日志应用
alter database recover managed standby database cancel;
-- 关闭job
alter system set job_queue_processes=0;
-- 一致性关库并启动到mount(只有mount状态才允许切换)
shutdown immediate
startup mount;
-- 切换为快照数据库
alter database convert to snapshot standby;
-- 启动到open
alter database open;
-- 查看状态 发现已经变为快照备库
select open_mode,name,DATABASE_ROLE,SWITCHOVER_STATUS from v$database;
ERROR at line 1:
ORA-32001: write to SPFILE requested but no SPFILE is in use
CREATE SPFILE FROM PFILE;
alter database recover managed standby database cancel;
shutdown immediate
startup
show parameter spfile;
ERROR at line 1:
ORA-16038: log 4 sequence# 14 cannot be archived
ORA-19809: limit exceeded for recovery files
ORA-00312: online log 4 thread 2:
'+DGSYSTEM/HFDB/ONLINELOG/group_4.271.1189800153'
SYS@hfdb1> select * from v$recovery_file_dest;
NAME
--------------------------------------------------------------------------------
SPACE_LIMIT SPACE_USED SPACE_RECLAIMABLE NUMBER_OF_FILES CON_ID
----------- ---------- ----------------- --------------- ----------
+DGRECOVERY
2147483648 2123366400 0 16 0
SYS@hfdb1> alter system set db_recovery_file_dest_size=4G scope=both;
System altered.
SYS@hfdb1> select * from v$recovery_file_dest;
NAME
--------------------------------------------------------------------------------
SPACE_LIMIT SPACE_USED SPACE_RECLAIMABLE NUMBER_OF_FILES CON_ID
----------- ---------- ----------------- --------------- ----------
+DGRECOVERY
4294967296 2123366400 0 16 0
SYS@hfdb1> alter system archive log current;
System altered.
SYS@hfdb1>
测试 备库可以读写:
SQL> set line 1000
SQL> col open_mode for a20
SQL> col database_role for a20
SQL> col switchover_status for a20
SQL> select open_mode,name,database_role,switchover_status from v$database;
OPEN_MODE NAME DATABASE_ROLE SWITCHOVER_STATUS
-------------------- --------------------------- -------------------- --------------------
READ WRITE HFDB SNAPSHOT STANDBY NOT ALLOWED
SQL> alter session set container =pdb;
SQL> alter database open;
-- 备库可以读写
SQL> update hftable1 set name ='hefei2' where rownum =1;
1 row updated.
切换成物理备库
-- 一致性关闭后启动到mount
shutdown immediate
startup mount
-- 切回成物理备库
alter database convert to physical standby;
-- 启动数据库 shutdown immediate startup
-- 启动实时日志应用
alter database recover managed standby database using current logfile disconnect from session;
-- 开启job
alter system set job_queue_processes=50;
-- 查看日志应用情况
select sequence#,applied from v$archived_log order by sequence#;
主备切换( hfdb90/91 > hfdb95)
switchover --有计划的,正常的切换,不会破环数据库dg关系。
failover --非计划的,破坏性的切换,切换之后,要重做DG。--【11g后有个对库的闪回,做一个闪回点,后再failover ,只有闪回空间足够大,是有机会可以闪回的】
switchover(方式一)
主库rac备库rs:
-- switchover
主库rac备库rs:
col name for a20
col open_mode for a20
col PROTECTION_MODE for a20
col DATABASE_ROLE for a20
col SWITCHOVER_STATUS for a20
set linesize 1000
SYS@dghfdb> select NAME,OPEN_MODE,PROTECTION_MODE,DATABASE_ROLE,SWITCHOVER_STATUS from v$database;
NAME OPEN_MODE PROTECTION_MODE DATABASE_ROLE SWITCHOVER_STATUS
-------------------- -------------------- -------------------- -------------------- --------------------
HFDB READ ONLY WITH APPLY MAXIMUM PERFORMANCE PHYSICAL STANDBY NOT ALLOWED
HFDB1@hfdb1> select NAME,OPEN_MODE,PROTECTION_MODE,DATABASE_ROLE,SWITCHOVER_STATUS from v$database;
NAME OPEN_MODE PROTECTION_MODE DATABASE_ROLE SWITCHOVER_STATUS
-------------------- -------------------- -------------------- -------------------- --------------------
HFDB READ WRITE MAXIMUM PERFORMANCE PRIMARY TO STANDBY
SYS@hfdb2> select NAME,OPEN_MODE,PROTECTION_MODE,DATABASE_ROLE,SWITCHOVER_STATUS from v$database;
NAME OPEN_MODE PROTECTION_MODE DATABASE_ROLE SWITCHOVER_STATUS
-------------------- -------------------- -------------------- -------------------- --------------------
HFDB READ WRITE MAXIMUM PERFORMANCE PRIMARY TO STANDBY
HFDB1@hfdb1> SELECT THREAD#, LOW_SEQUENCE#, HIGH_SEQUENCE# FROM V$ARCHIVE_GAP;
no rows selected
SYS@dghfdb> select thread#,sequence#,first_time,next_time,applied from v$archived_log where applied='NO';
no rows selected
#对主库进行切换,rac集群关闭第二个节点。(如果SWITCHOVER_STATUS的值为TO STANDBY或者为SESSIONS ACTIVE都可以切换至备库)
--主库90 #关闭rac2
SYS@hfdb2> shutdown immediate
alter database commit to switchover to physical standby;
or:
alter database commit to switchover to physical standby with session shutdown;
startup; --这时第二个实例也要启动
SYS@hfdb1> startup
SYS@hfdb2> startup
——————————————————————————————报错解决——————————————————————————————————————————
ORA-10458: standby database requires recovery
ORA-01194: file 1 needs more recovery to be consistent
ORA-01110: data file 1: '+DGSYSTEM1/HFDB/DATAFILE/system.258.1184265513'
SYS@hfdb1> recover managed standby database using current logfile disconnect from session;
Media recovery complete.
SYS@hfdb1> alter database open read only;
ERROR at line 1:
ORA-10458: standby database requires recovery
ORA-01194: file 1 needs more recovery to be consistent
ORA-01110: data file 1: '+DGSYSTEM1/HFDB/DATAFILE/system.261.1185439411'
这里检查主库(95)报错:
ARC2 (PID:27173): FAL archive failed with error 12154 (see trace for details)
ARC2 (PID:27173): FAL archive failed, archiver continuing
和
krsu_dump_oci_emsg: Detailed OCI error val is 12154 and errmsg is ORA-12154: TNS:could not resolve the connect identifier specified
#这个地方的问题是:95上面的tns不能到达主库(scanIP 92)。重新配置tns后,在把(90和91库)停止,在startup恢复。
--备库95:
alter database commit to switchover to primary;
or:
SYS@dghfdb> alter database commit to switchover to primary with session shutdown;
SYS@dghfdb> alter database open;
--90
alter database recover managed standby database disconnect from session;
--alter database recover managed standby database cancel;
insert into hfdb1.itpux values('itpux','4');
commit;
alter system archive log current;
select * from hfdb1.itpux;
#
select * from hfdb1.test ;
insert into hfdb1.test values('ab3','2');
SYS@dghfdb> select * from hfdb1.test ;
C1 C2
------------------------------ ----------
abc 1
SYS@dghfdb> insert into hfdb1.test values('ab3','2');
1 row created.
SYS@dghfdb> commit;
SYS@hfdb1> select * from hfdb1.test ;
C1 C2
------------------------------ ----------
abc 1
ab3 2
SYS@hfdb1>
主备回切FS>RAC ( 95> 90)
select NAME,OPEN_MODE,PROTECTION_MODE,DATABASE_ROLE,SWITCHOVER_STATUS from v$database;
SELECT THREAD#, LOW_SEQUENCE#, HIGH_SEQUENCE# FROM V$ARCHIVE_GAP;
select thread#,sequence#,first_time,next_time,applied from v$archived_log where applied='NO';
--主库95
conn / as sysdba
alter database commit to switchover to physical standby;
or:
alter database commit to switchover to physical standby with session shutdown;
startup
--备库90
关闭实例二
SYS@hfdb2> shutdown immediate
alter database commit to switchover to primary;
or:
alter database commit to switchover to primary with session shutdown;
alter database open;
--打开两台rac
--95:
alter database recover managed standby database disconnect from session;
insert into hfdb1.itpux values('itpux','5');
commit;
alter system archive log current;
faillover
假设物理主库宕机,无法启动,紧急启用备库。直接在备库上操作,将备库转换为主库角色。备库上执行下面四条命令即可
SQL > alter database recover managed standby database finish;
SQL > alter database commit to switchover to primary;
SQL > shutdown immediate;
SQL > startup;
具体操作
直接备库failover切换
SQL> alter database recover managed standby database finish;
Database altered.
再直接备库转换为主库
SQL> alter database commit to switchover to primary;
关闭备库
SQL> shutdown immediate
ORA-01109: database not open
Database dismounted.
ORACLE instance shut down.
开启备库
SQL> startup
查看转换角色
- SQL> select open_mode,database_role,switchover_status from v$database;
- OPEN_MODE DATABASE_ROLE SWITCHOVER_STATUS
- ---------- ---------------- -------------------- ------------------------------------
- READ WRITE PRIMARY NOT ALLOWED
现在备库成为了主库角色,failover切换完成;
维护
查看主备课的日志应用状态
select sequence#,applied from v$archived_log order by sequence#;
查备库进程
select process,client_process,sequence#,status,BLOCK#,BLOCKS from v$managed_standby;
查看备库延迟
set linesize 300
col name for a20
col value for a20
col time_computed for a30
select name,value,unit,time_computed from v$dataguard_stats where name in ('transport lag','apply lag');