三、在rac1,rac2上安裝oracle
1、 系統的軟硬件檢查
[root@rac1 ~]# cat /etc/issue
Red Hat Enterprise Linux AS release 4 (Nahant Update 7)
Kernel \r on an \m
[root@rac1 ~]# uname -a
Linux rac1 2.6.9-78.ELsmp #1 SMP Wed Jul 9 15:39:47 EDT 2008 i686 i686 i386 GNU/Linux
[root@rac1 ~]# cat /proc/meminfo |grep MemTotal
MemTotal: 1034496 kB
[root@rac1 ~]# cat /proc/meminfo |grep SwapTotal
SwapTotal: 2096472 kB
可以看到:操作系統版本爲4.7;內核版本爲2.6.9-78.ELsmp;交換空間應爲內存的兩倍,若不是兩倍,則應增加交換空間大小;
(增加swap大小
#dd if=/dev/zero of=/swapfile bs=1M count=2048
#mkswap /swapfile
#swapon /swapfile
#free
#vi /etc/fstab /*添加如下內容
/swapfile swap swap defaults 0 0
)
系統軟件包檢查,標”/*”號的爲必須要安裝的系統軟件包,若檢查沒安裝,則需對其進行安裝
[root@rac1 ~]# rpm -qa|grep binutils
binutils-2.15.92.0.2-25
[root@rac1 ~]# rpm -qa|grep compat-db
compat-db-4.1.25-9
[root@rac1 ~]# rpm -qa|grep control-center
control-center-2.8.0-12.rhel4.5
[root@rac1 ~]# rpm -qa|grep gcc
gcc-3.4.6-10 /*
gcc-java-3.4.6-10
compat-gcc-32-3.2.3-47.3
gcc-g77-3.4.6-10
compat-libgcc-296-2.96-132.7.2
gcc-c++-3.4.6-10 /*
libgcc-3.4.6-10
compat-gcc-32-c++-3.2.3-47.3
[root@rac1 ~]# rpm -qa|grep glibc
glibc-headers-2.3.4-2.41
glibc-2.3.4-2.41 /*
glibc-kernheaders-2.4-9.1.103.EL
glibc-common-2.3.4-2.41 /*
glibc-devel-2.3.4-2.41
[root@rac1 ~]# rpm -qa|grep gnome-libs
gnome-libs-1.4.1.2.90-44.1
[root@rac1 ~]# rpm -qa|grep libstdc++
compat-libstdc++-296-2.96-132.7.2
libstdc++-3.4.6-10 /*
libstdc++-devel-3.4.6-10 /*
compat-libstdc++-33-3.2.3-47.3
[root@rac1 ~]# rpm -qa|grep make
automake17-1.7.9-5
automake14-1.4p6-12
automake16-1.6.3-5
make-3.80-7.EL4 /*
automake15-1.5-13
automake-1.9.2-3
在rac2上做相同的操作
2、 創建用戶和組
[root@rac1 ~]# groupadd -g 1000 oinstall
[root@rac1 ~]# groupadd -g 1001 dba
[root@rac1 ~]# useradd -u 1000 -g oinstall -G dba oracle
[root@rac1 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:
passwd: all authentication tokens updated successfully.
在rac2上做相同的操作
3、 設置oracle用戶環境變量
[oracle@rac1 ~]$ pwd
/home/oracle
[oracle@rac1 ~]$ ls -al|grep .bash_profile
-rw-r--r-- 1 oracle oinstall 191 Feb 23 16:14 .bash_profile
[oracle@rac1 ~]$ vi .bash_profile /*添加如下內容
export PATH
unset USERNAME
umask=022
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl1
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
[oracle@rac2 ~]$ vi .bash_profile /*添加如下內容
export PATH
unset USERNAME
umask=022
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl2
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
4、 設置root用戶環境變量
[root@rac1 ~]# pwd
/root
[root@rac1 ~]# ls -al|grep .bash_profile
-rw-r--r-- 1 root root 191 Sep 23 2004 .bash_profile
[root@rac1 ~]# vi .bash_profile /*添加如下內容
export PATH
unset USERNAME
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl1
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
[root@rac2 ~]# vi .bash_profile /*添加如下內容
export PATH
unset USERNAME
ORACLE_BASE=/db/oracle
ORA_CRS_HOME=$ORACLE_BASE/product/10.2.0/crs
ORACLE_HOME=$ORACLE_BASE/product/10.2.0/db_1
ORACLE_SID=orcl2
PATH=$ORACLE_HOME/bin:$ORA_CRS_HOME/bin:$PATH:.
export ORACLE_BASE ORA_CRS_HOME ORACLE_HOME ORACLE_SID PATH
5、 創建軟件安裝目錄
[root@rac1 ~]# mkdir -p /db/oracle
[root@rac1 ~]# chown -R oracle:oinstall /db
[root@rac1 ~]# chmod 755 /db
在rac2上做相同的操作
6、 編寫/etc/hosts文件
[root@rac1 ~]# vi /etc/hosts
127.0.0.1 localhost.localdomain localhost
192.168.0.190 rac1
192.168.0.191 rac2
10.10.0.190 rac1-priv
10.10.0.191 rac2-priv
192.168.0.109 rac1-vip
192.168.0.119 rac2-vip
[root@rac2 ~]# vi /etc/hosts /*添加的內容同rac1一樣
7、 配置結點等價性
[root@rac1 etc]# vi /etc/hosts.equiv
+rac1 oracle
+rac2 oracle
[root@rac1 bin]# pwd
/usr/kerberos/bin
[root@rac1 bin]# mv rsh rsh.bak
[root@rac1 bin]# mv rlogin rlogin.bak
[root@rac1 bin]# mv rcp rcp.bak
[root@rac1 bin]# chkconfig rsh on
[root@rac1 bin]# chkconfig rlogin on
[root@rac1 bin]# service xinetd reload
Reloading configuration: [ OK ]
在rac2上也做同樣的操作
8、 修改內核參數,並使參數生效
[root@rac1 bin]# vi /etc/sysctl.conf
kernel.core_uses_pid = 1
kernel.shmall=2097152
kernel.shmmax=2147483648
kernel.shmmni=4096
kernel.sem=250 32000 100 128
fs.file-max=65536
net.ipv4.ip_local_port_range=1024 65000
net.core.rmem_default=1048576
net.core.rmem_max=1048576
net.core.wmem_default=262144
net.core.wmem_max=262144
[root@rac1 bin]# sysctl –p
net.ipv4.ip_forward = 0
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.default.accept_source_route = 0
kernel.sysrq = 0
kernel.core_uses_pid = 1
kernel.shmall = 2097152
kernel.shmmax = 2147483648
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 65536
net.ipv4.ip_local_port_range = 1024 65000
net.core.rmem_default = 1048576
net.core.rmem_max = 1048576
net.core.wmem_default = 262144
net.core.wmem_max = 262144
在rac2上做同樣的操作
9、 設置用戶限制
[root@rac1 bin]# vi /etc/security/limits.conf
#@student - maxlogins 4
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
[root@rac1 bin]# vi /etc/pam.d/login /*在未尾添加如下內容
session required pam_selinux.so open
session required /lib/security/pam_limits.so
session required pam_limits.so
[root@rac1 bin]# vi /etc/profile /*在末尾添加如下內容
unset i
unset pathmunge
if [ $USER = "oracle" ]; then
if [ $SHELL = "/bin/ksh" ]; then
ulimit -p 16384
ulimit -n 65536
else
ulimit -u 16384 -n 65536
fi
fi
[root@rac1 bin]# vi /etc/csh.login /*在末尾添加如下內容
if ( $USER == "oracle" ) then
limit maxproc 16384
limit descriptors 65536
endif
在rac2做同樣的操作
10、 配置hangcheck-timer
[root@rac1 bin]# modprobe hangcheck-timer hangcheck_tick=1 hangcheck_margin=10
[root@rac1 bin]# vi /etc/rc.local /*在末尾添加如下內容
modprobe hangcheck-timer hangcheck_tick=1 hangcheck_margin=10
在rac2做同樣的操作
11、 配置OCFS2文件系統
上傳ocfs軟件包,該軟件包可在ORACLE官網裏下載http://oss.oracle.com/projects/ocfs2/files/,需根據操作系統的內核版本下載相應的軟件包。
[root@rac1 ocfs2]# pwd
/home/oracle/ocfs2
[root@rac1 ocfs2]# ls
ocfs2-2.6.9-78.ELsmp-1.2.9-1.el4.i686.rpm
ocfs2console-1.2.7-1.el4.i386.rpm
ocfs2-tools-1.2.7-1.el4.i386.rpm
[root@rac1 ocfs2]# rpm -Uvh ocfs2*
Preparing... ########################################### [100%]
1:ocfs2-tools ########################################### [ 33%]
2:ocfs2-2.6.9-78.ELsmp ########################################### [ 67%]
3:ocfs2console ########################################### [100%]
在rac2做同樣的操作
[root@rac1 ocfs2]# ocfs2console&
[1] 12115
點擊Cluster/Configure Nodes/Close/Add,填寫Name:rac1,IP Address:192.168.0.190,OK;Add,填寫Name:rac2,IP Address:192.168.0.191,OK
點擊Apply,close
點擊Cluster/Propagate Configuration
輸入yes,回車
輸入rac2的root密碼,出現Finished!,點擊close,關閉ocfs2console
[root@rac1 ocfs2]# service o2cb configure /*在rac1配置o2cb服務
Configuring the O2CB driver.
This will configure the on-boot properties of the O2CB dr iver.
The following questions will determine whether the driver is loaded on
boot. The current values will be shown in brackets ('[]' ). Hitting
<ENTER> without typing an answer will keep that current v alue. Ctrl-C
will abort.
Load O2CB driver on boot (y/n) [n]: y
Cluster to start on boot (Enter "none" to clear) [ocfs2]:
Specify heartbeat dead threshold (>=7) [31]:
Specify network idle timeout in ms (>=5000) [30000]:
Specify network keepalive delay in ms (>=1000) [2000]:
Specify network reconnect delay in ms (>=2000) [2000]:
Writing O2CB configuration: OK
O2CB cluster ocfs2 already online
[2]+ Done ocfs2console
[root@rac2 ocfs2]# service o2cb configure /*在rac2配置o2cb服務
Configuring the O2CB driver.
This will configure the on-boot properties of the O2CB driver.
The following questions will determine whether the driver is loaded on
boot. The current values will be shown in brackets ('[]'). Hitting
<ENTER> without typing an answer will keep that current value. Ctrl-C
will abort.
Load O2CB driver on boot (y/n) [n]: y
Cluster to start on boot (Enter "none" to clear) [ocfs2]:
Specify heartbeat dead threshold (>=7) [31]:
Specify network idle timeout in ms (>=5000) [30000]:
Specify network keepalive delay in ms (>=1000) [2000]:
Specify network reconnect delay in ms (>=2000) [2000]:
Writing O2CB configuration: OK
Loading module "configfs": OK
Creating directory '/config': OK
Mounting configfs filesystem at /config: OK
Loading module "ocfs2_nodemanager": OK
Loading module "ocfs2_dlm": OK
Loading module "ocfs2_dlmfs": OK
Creating directory '/dlm': OK
Mounting ocfs2_dlmfs filesystem at /dlm: OK
Starting O2CB cluster ocfs2: OK
12、 配置ASM
上傳下載好的軟件包(需根據操作系統內核版本下載相應的軟件包),下載網址:
http://www.oracle.com/technetwork/topics/linux/downloads/rhel4-092650.html
[root@rac1 oracleasm]# pwd
/home/oracle/oracleasm
[root@rac1 oracleasm]# ls
oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm oracleasm-support-2.1.4-1.el4.i386.rpm
oracleasmlib-2.0.4-1.el4.i386.rpm
[root@rac1 oracleasm]# rpm -Uvh oracleasm*
warning: oracleasm-2.6.9-78.ELsmp-2.0.5-1.el4.i686.rpm: V3 DSA signature: NOKEY, key ID b38a8516
Preparing... ########################################### [100%]
1:oracleasm-support ########################################### [ 33%]
2:oracleasm-2.6.9-78.ELsm######################################### [ 67%]
3:oracleasmlib ########################################### [100%]
[root@rac1 oracleasm]# service oracleasm configure
Configuring the Oracle ASM library driver.
This will configure the on-boot properties of the Oracle ASM library
driver. The following questions will determine whether the driver is
loaded on boot and what permissions it will have. The current values
will be shown in brackets ('[]'). Hitting <ENTER> without typing an
answer will keep that current value. Ctrl-C will abort.
Default user to own the driver interface []: oracle
Default group to own the driver interface []: dba
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]: y
Writing Oracle ASM library driver configuration: done
Initializing the Oracle ASMLib driver: [ OK ]
Scanning the system for Oracle ASMLib disks: [ OK ]
在rac2也做相同的操作
13、 配置共享磁盤
A. 創建磁盤文件
在ESX3.5上:
使用TELNET或SSH工具,登陸到ESX服務器上,使用下面的命令創建虛擬客戶機使用的共享磁盤文件:
[root@esx fcdisk]# cd /vmfs/volumes/fcdisk /*我選擇在fcdisk這塊磁盤上創建共享盤
[root@esx fcdisk]# mkdir crsdisk
[root@esx fcdisk]# cd crsdisk
[root@esx crsdisk]# vmkfstools -c 2048m -a lsilogic -d thick crsdisk.vmdk/*創建了2G的crsdisk磁盤文件
[root@esx crsdisk]# cd ..
[root@esx fcdisk]# mkdir datadisk /*以同樣的方法創建datadisk,flashdisk共享磁盤
[root@esx fcdisk]# cd datadisk
[root@esx datadisk]# vmkfstools -c 20000m -a lsilogic -d thick datadisk.vmdk
[root@esx datadisk]# cd ..
[root@esx fcdisk]# mkdir flashdisk
[root@esx fcdisk]# cd flashdisk/
[root@esx flashdisk]# vmkfstools -c 20000m -a lsilogic -d thick flashdisk.vmdk
若在ESX4上:創建磁盤的命令如下
[root@esx datadisk]# vmkfstools -c 2G -a lsilogic -d eagerzeroedthick crsdisk.vmdk
Datadisk,flashdisk的創建與其相同。
B. 重新啓動rac1和rac2,將磁盤加入到虛擬機中
右單擊虛擬機“rac1”,選擇Edit Setting,選擇Hard Disk 1,Add(添加磁盤),選擇Hard Disk,
遇到下面界面時選擇“Use an existing virtual disk”
選擇SCSI(1:0);Independent,Persistent,next,finish
點擊Add,繼續添加datadisk,flashdisk共享磁盤,datadisk的Virtual Device Node選擇SCSI(1:1);flashdisk選擇SCSI(1:2)
點擊OK
在rac2添加同樣的共享磁盤
C. 修改虛擬機的vmx設置文件
在ESX3.5上需做如下設置:
[root@esx fcdisk]# pwd
/vmfs/volumes/fcdisk
[root@esx fcdisk]# cd rac1 /*因爲rac1裝在fcdisk磁盤中
[root@esx rac1]# ls |grep *.vmx
rac1.vmx
rac1.vmxf
[root@esx rac1]# vi rac1.vmx /*藍色標記,爲需修改或添加內容
scsi1.present = "true"
scsi1.sharedBus = "virtual"
scsi1.virtualDev = "lsilogic"
scsi1:0.present = "true"
scsi1:0.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/crsdisk/cr
sdisk.vmdk"
scsi1:0.mode = "independent-persistent"
scsi1:0.deviceType = "scsi-hardDisk"
sched.scsi1:0.shares="normal"
scsi1:1.present = "true"
scsi1:1.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/datadisk/d
atadisk.vmdk"
scsi1:1.mode = "independent-persistent"
scsi1:1.deviceType = "scsi-hardDisk"
sched.scsi1:1.shares="normal"
scsi1:2.present = "true"
scsi1:2.fileName = "/vmfs/volumes/4d182f2b-52f309be-92e1-0013724f9d5e/flashdisk/
flashdisk.vmdk"
scsi1:2.mode = "independent-persistent"
scsi1:2.deviceType = "scsi-hardDisk"
sched.scsi1:2.shares="normal"
floppy0.fileName = "/dev/fd0"
disk.locking="false"
diskLib.dataCacheMaxSize="0"
diskLib.dataCacheMaxReadAheadSize="0"
diskLib.dataCacheMinReadAheadSize="0"
diskLib.dataCachePageSize="4096"
diskLib.maxUnsyncedWrites="0"
在ESX4上只需將新添加的控制器1的SCSI總線共享改爲虛擬即可
在rac2做同樣的操作
14、 對共享磁盤進行分區(只在一個節點上進行即可)
啓動rac1與rac2,以root用戶登錄,如下顯示,可以看到,三張共享磁盤已經在rac1上,在rac2上執行如下命令,同時也可看到這三張共享盤
[root@rac1 ~]# fdisk -l
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id Syst em
/dev/sda1 * 1 13 104391 83 Linu x
/dev/sda2 14 1033 8193150 83 Linu x
/dev/sda3 1034 1294 2096482+ 82 Linu x swap
/dev/sda4 1295 2610 10570770 5 Exte nded
/dev/sda5 1295 2610 10570738+ 83 Linu x
Disk /dev/sdb: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdb doesn't contain a valid partition table
Disk /dev/sdc: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdc doesn't contain a valid partition table
Disk /dev/sdd: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Disk /dev/sdd doesn't contain a valid partition table
[root@rac1 ~]# fdisk /dev/sdb /*只需在rac1上對磁盤sdb進行分區,藍色字體爲需輸入的字
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
Command (m for help): n
Command action
e extended
p primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-261, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-261, default 261):
Using default value 261
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
以同樣的方法對sdd,sdc進行分區
[root@rac1 ~]# fdisk /dev/sdc
[root@rac1 ~]# fdisk /dev/sdd
[root@rac1 ~]# fdisk –l /*已對其共享磁盤分好區
Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sda1 * 1 13 104391 83 Linux
/dev/sda2 14 1033 8193150 83 Linux
/dev/sda3 1034 1294 2096482+ 82 Linux swap
/dev/sda4 1295 2610 10570770 5 Extended
/dev/sda5 1295 2610 10570738+ 83 Linux
Disk /dev/sdb: 2147 MB, 2147483648 bytes
255 heads, 63 sectors/track, 261 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdb1 1 261 2096451 83 Linux
Disk /dev/sdc: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdc1 1 2549 20474811 83 Linux
Disk /dev/sdd: 20.9 GB, 20971520000 bytes
255 heads, 63 sectors/track, 2549 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/sdd1 1 2549 20474811 83 Linux
[root@rac1 ~]# mkfs.ocfs2 -N 4 -L crs /dev/sdb1 /*在分區/dev/sdb1上創建ocfs2文件系統
mkfs.ocfs2 1.2.7
Filesystem label=crs
Block size=4096 (bits=12)
Cluster size=4096 (bits=12)
Volume size=2146762752 (524112 clusters) (524112 blocks)
17 cluster groups (tail covers 8016 clusters, rest cover 32256 clusters)
Journal size=67108864
Initial number of node slots: 4
Creating bitmaps: done
Initializing superblock: done
Writing system files: done
Writing superblock: done
Writing backup superblock: 1 block(s)
Formatting Journals: done
Writing lost+found: done
mkfs.ocfs2 successful
[root@rac1 ~]# mkdir /crs
[root@rac1 ~]# vi /etc/fstab /*添加如下內容
LABEL=crs /crs ocfs2 _netdev,datavolume,nointr 0 0
[root@rac1 ~]# mount /crs
[root@rac1 ~]# chown -R oracle:oinstall /crs
[root@rac1 ~]# chmod -R 755 /crs
[root@rac1 ~]# service oracleasm createdisk data /dev/sdc1
Marking disk "data" as an ASM disk: [ OK ]
[root@rac1 ~]# service oracleasm createdisk flash /dev/sdd1
Marking disk "flash" as an ASM disk: [ OK ]
[root@rac2 ~]# partprobe
[root@rac2 ~]# mkdir /crs
[root@rac2 ~]# vi /etc/fstab
[root@rac2 ~]# mount /crs
[root@rac2 ~]# service oracleasm scandisks
Scanning the system for Oracle ASMLib disks: [ OK ]
[root@rac2 ~]# service oracleasm listdisks
DATA
FLASH