Building Tablespace Growth Report from Grid Data

Posted by Sagar Patil

Use SQL Query below at Grid sysman database to locate growth of tablespaces over time. The returned results data could be used under EXCEL to generate graphs.

SELECT   target_name,
 KEY_VALUE,
 ROLLUP_TIMESTAMP,
 METRIC_COLUMN,
 sample_count,
 ROUND (AVERAGE / 1000) Allocted_GB
 FROM   sysman.MGMT$METRIC_DAILY
 WHERE       metric_name = 'tbspAllocation'
 AND LOWER (TARGET_NAME) LIKE '%Oracle_db_name%'
 AND KEY_VALUE = 'DATA'
 AND LOWER (TARGET_TYPE) = 'oracle_database'
 AND Metric_column = 'spaceAllocated'
ORDER BY   ROLLUP_TIMESTAMP DESC;

TARGET_NAME    KEY_VALUE    ROLLUP_TIMESTAMP    METRIC_COLUMN    SAMPLE_COUNT    ALLOCTED_GB
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    31/10/2010    spaceAllocated    1    395
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    19/10/2010    spaceAllocated    1    394
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    18/10/2010    spaceAllocated    1    394
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    21/09/2010    spaceAllocated    1    378
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    09/08/2010    spaceAllocated    1    371
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    25/08/2010    spaceAllocated    1    365
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    14/08/2010    spaceAllocated    1    357
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    08/07/2010    spaceAllocated    1    353
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    31/07/2010    spaceAllocated    1    349
Prod.oracle.db.uk       PROD_ARCHIVE_DATA_001    26/06/2010    spaceAllocated    1    330

Following attribute_types are available for target_type=’host’

CPUUsage
DiskActivity
EMDUploadStats
FileMonitoring
Filesystems
Load
Network
PagingActivity
ProcessInfo
ProgramResourceUtilization
Response
Swap_Area_Status
ThreadCPUUsageSummary
TotalDiskUsage
host_storage_history
proc_zombie

Following attribute_types are available for target_type=’oracle_database’

DATABASE_SIZE
Database_Resource_Usage
DeferredTrans
OCMInstrumentation
Recovery_Area
Response
UserAudit
UserBlock
UserLocks
alertLogStatus
all_invalid_objects
archFull
dbjob_status
dumpFull
ha_flashrecovery
ha_recovery
instance_efficiency
instance_throughput
invalid_objects
problemTbsp
segment_advisor_count
service
sga_pool_wastage
sql_response
streams_processes_count
system_response_time_per_call
tbspAllocation
wait_bottlenecks
wait_sess_cls

 

 

Oracle Roles and Users audit report

Posted by Sagar Patil

I have often used following procedure to create list of roles & corresponding users mapped into it. This was then used to email business owners as well as for user accesss auditing.

-- Package Name : users_granted_role
-- This package was created to spool user and their respective Privs from oracle data Dictionary.
CREATE OR REPLACE PACKAGE users_granted_role
IS
 procedure write_op (pv_str in varchar2);
 function user_or_role(pv_grantee in dba_users.username%type) return varchar2;
 function role_pwd(pv_role in dba_roles.role%type)   return varchar2;
 procedure get_users(pv_grantee in dba_roles.role%type,pv_tabstop in out number);
 procedure get_role (pv_role in varchar2);
 Procedure extract_user_role_details;
END users_granted_role;
/
create or replace package body users_granted_role
IS
 output_method varchar2(1) :='S';
 skip_user varchar2(1) := 'N';
 user_to_skip varchar2(20) :='TEST%';
 --
 lg_fptr utl_file.file_type;
 lv_file_or_screen varchar2(1):='S';
 v_tag      VARCHAR2(8);
 v_filename VARCHAR2(30);
 v_today    VARCHAR2(8);
 --
 cursor find_all_roles is
 Select    role
 from    dba_roles;
 --
procedure write_op (pv_str in varchar2) is
 begin
 v_today    := TO_CHAR(sysdate,'YYYYMMDD');
 v_tag      := 'UserPriv';
 v_filename := 'User_Privileges'|| v_today;

 if lv_file_or_screen='S' then
 dbms_output.put_line(v_tag || v_filename||' '||pv_str);
 else
 utl_file.put_line(lg_fptr,pv_str);
 end if;
 exception
 when utl_file.invalid_path  then
 dbms_output.put_line('invalid path');
 when utl_file.invalid_mode  then
 dbms_output.put_line('invalid mode');
 when utl_file.invalid_filehandle  then
 dbms_output.put_line('invalid filehandle');
 when utl_file.invalid_operation  then
 dbms_output.put_line('invalid operation');
 when utl_file.read_error  then
 dbms_output.put_line('read error');
 when utl_file.write_error  then
 dbms_output.put_line('write error');
 when utl_file.internal_error  then
 dbms_output.put_line('internal error');
 when others then
 dbms_output.put_line('ERROR (write_op) => '||sqlcode);
 dbms_output.put_line('MSG (write_op) => '||sqlerrm);

end write_op;
 --
function user_or_role(pv_grantee in dba_users.username%type) return varchar2 is
 --
 cursor c_use (cp_grantee in dba_users.username%type) is
 select  'USER' userrole
 from    dba_users u
 where   u.username=cp_grantee
 union
 select  'ROLE' userrole
 from    dba_roles r
 where   r.role=cp_grantee;
 --
 lv_use c_use%rowtype;
 --
 begin
 open c_use(pv_grantee);
 fetch c_use into lv_use;
 close c_use;
 return lv_use.userrole;
 exception
 when others then
 dbms_output.put_line('ERROR (user_or_role) => '||sqlcode);
 dbms_output.put_line('MSG (user_or_role) => '||sqlerrm);
end user_or_role;
 -----------------
function role_pwd(pv_role in dba_roles.role%type)   return varchar2 is
 --
 cursor c_role(cp_role in dba_roles.role%type) is
 select    r.password_required
 from    dba_roles r
 where    r.role=cp_role;
 --
 lv_role c_role%rowtype;
 --
 begin
 open c_role(pv_role);
 fetch c_role into lv_role;
 close c_role;
 return lv_role.password_required;
 exception       
 when others then null;
 --dbms_output.put_line('ERROR (role_pwd) => '||sqlcode);
 --dbms_output.put_line('MSG (role_pwd) => '||sqlerrm);
end role_pwd;
 --
procedure get_users(pv_grantee in dba_roles.role%type,pv_tabstop in out number) is
 --
 lv_tab varchar2(50):='';
 lv_loop number;
 lv_user_or_role dba_users.username%type;
 --
 cursor c_user (cp_username in dba_role_privs.grantee%type) is
 select  d.grantee,
 d.admin_option
 from    dba_role_privs d
 where   d.granted_role=cp_username;
 --
 begin
 pv_tabstop:=pv_tabstop+1;
 for lv_loop in 1..pv_tabstop loop
 lv_tab:=lv_tab||chr(9);
 end loop;

 for lv_user in c_user(pv_grantee) loop
 lv_user_or_role:=user_or_role(lv_user.grantee);
 if lv_user_or_role = 'ROLE' then
 if lv_user.grantee = 'PUBLIC' then
 write_op(lv_tab||'Role => '||lv_user.grantee
 ||' (ADM = '||lv_user.admin_option
 ||'|PWD = '||role_pwd(lv_user.grantee)||')');
 else
 write_op(lv_tab||'Role => '||lv_user.grantee
 ||' (ADM = '||lv_user.admin_option
 ||'|PWD = '||role_pwd(lv_user.grantee)||')'
 ||' which is granted to =>');
 end if;
 get_users(lv_user.grantee,pv_tabstop);
 else
 if upper(skip_user) = 'Y' and lv_user.grantee like upper(user_to_skip) then
 null;
 else
 write_op(lv_tab||'User => '||lv_user.grantee
 ||' (ADM = '||lv_user.admin_option||')');
 end if;
 end if;
 end loop;
 pv_tabstop:=pv_tabstop-1;
 lv_tab:='';
 exception
 when others then
 dbms_output.put_line('ERROR (get_users) => '||sqlcode);
 dbms_output.put_line('MSG (get_users) => '||sqlerrm);       
end get_users;
----
procedure get_role (pv_role in varchar2) is
 --
 cursor c_main (cp_role in varchar2) is
 select    p.grantee,
 p.admin_option
 from    dba_role_privs p
 where    p.granted_role=cp_role;
 --
 lv_userrole dba_users.username%type;
 lv_tabstop number;
 --
 --
begin
 lv_tabstop:=1;
 for lv_main in c_main(pv_role) loop   
 lv_userrole:=user_or_role(lv_main.grantee);
 if lv_userrole='USER' then
 if upper(skip_user) = 'Y' and lv_main.grantee like upper(user_to_skip) then
 null;
 else
 write_op(chr(9)||'User => '||lv_main.grantee
 ||' (ADM = '||lv_main.admin_option||')');
 end if;
 else
 if lv_main.grantee='PUBLIC' then
 write_op(chr(9)||'Role => '||lv_main.grantee
 ||' (ADM = '||lv_main.admin_option
 ||'|PWD = '||role_pwd(lv_main.grantee)||')');
 else
 write_op(chr(9)||'Role => '||lv_main.grantee
 ||' (ADM = '||lv_main.admin_option
 ||'|PWD = '||role_pwd(lv_main.grantee)||')'
 ||' which is granted to =>');
 end if;
 get_users(lv_main.grantee,lv_tabstop);
 end if;
 end loop;
 exception
 when others then
 dbms_output.put_line('ERROR (get_role) => '||sqlcode);
 dbms_output.put_line('MSG (get_role) => '||sqlerrm);
end get_role;

Procedure extract_user_role_details is
begin
 write_op('Users_granted_role: Release 1.0 - Author : Sagar PATIL on '|| sysdate);
 for role_to_find in find_all_roles loop
 lv_file_or_screen:= upper(output_method);
 write_op(chr(10));
 write_op('Investigating Role => '||upper(role_to_find.role)||' (PWD = '
 ||role_pwd(upper(role_to_find.role))||') which is granted to =>');
 write_op('====================================================================');
 get_role(upper(role_to_find.role));
 end loop;
exception
 when others then
 dbms_output.put_line('ERROR (main) => '||sqlcode);
 dbms_output.put_line('MSG (main) => '||sqlerrm);
end extract_user_role_details;
end;
/

Run it as below

SQL> spool list_of_users.lst
SQL> set serveroutput on size 20000;
SQL> exec users_granted_role.extract_user_role_details;

UserPrivUser_Privileges20101026 Investigating Role => CONNECT (PWD = NO) which is granted to =>
====================================================================
UserPrivUser_Privileges20101026         User => WMSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => SPATIAL_CSW_ADMIN_USR (ADM = NO)
UserPrivUser_Privileges20101026         User => SYSMAN (ADM = NO)
UserPrivUser_Privileges20101026         User => GRIDCONTROL (ADM = NO)
UserPrivUser_Privileges20101026         User => RMAN (ADM = NO)
UserPrivUser_Privileges20101026         User => MDDATA (ADM = NO)
UserPrivUser_Privileges20101026         User => OWBSYS (ADM = YES)
UserPrivUser_Privileges20101026         User => SYSMAN_MDS (ADM = NO)
UserPrivUser_Privileges20101026         User => SYS (ADM = YES)
UserPrivUser_Privileges20101026         User => MDSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => SPATIAL_WFS_ADMIN_USR (ADM = NO)
UserPrivUser_Privileges20101026         User => APEX_030200 (ADM = YES)
UserPrivUser_Privileges20101026         User => SCOTT (ADM = NO)
UserPrivUser_Privileges20101026

UserPrivUser_Privileges20101026 Investigating Role => RESOURCE (PWD = NO) which is granted to =>
====================================================================
UserPrivUser_Privileges20101026         User => WMSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => SCOTT (ADM = NO)
UserPrivUser_Privileges20101026         User => SPATIAL_CSW_ADMIN_USR (ADM = NO)
UserPrivUser_Privileges20101026         User => RMAN (ADM = NO)
UserPrivUser_Privileges20101026         Role => LOGSTDBY_ADMINISTRATOR (ADM = NO|PWD =NO) which is granted to =>
UserPrivUser_Privileges20101026                 User => SYS (ADM = YES)
UserPrivUser_Privileges20101026         User => EXFSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => SPATIAL_WFS_ADMIN_USR (ADM = NO)
UserPrivUser_Privileges20101026         User => CTXSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => OLAPSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => MDSYS (ADM = NO)
UserPrivUser_Privileges20101026         User => SYSMAN_MDS (ADM = NO)
UserPrivUser_Privileges20101026         User => XDB (ADM = NO)
UserPrivUser_Privileges20101026         User => APEX_030200 (ADM = YES)
UserPrivUser_Privileges20101026         User => SYS (ADM = YES)
UserPrivUser_Privileges20101026         User => SYSMAN (ADM = NO)
UserPrivUser_Privileges20101026         User => OUTLN (ADM = NO)
UserPrivUser_Privileges20101026         User => MDDATA (ADM = NO)
PL/SQL procedure successfully completed.
SQL> spool off;

Shell Script to Generate Daily/Weekly AWR reports (Email)

Posted by Sagar Patil

Create  .run_awr with following details  :

"TNS-connect-string : recipient-list : hrs of AWR snapshot"
[oracle@ ~]$ cat .run_awr
prod:root@oracledbasupport.co.uk:11
I added this script in my crontab for a daily emails:
########## Daily Export of AWR reports
02 18 * * * /home/oracle/.awr_daily.sh  >> /home/oracle/awr.log 2>&1
[oracle@awr_reports]$ ls -lrt
-rw-r--r-- 1 oracle oracle 315104 Oct 26 10:02 AWR_26102010_1002_prod.HTML
-rw-r--r-- 1 oracle oracle 343839 Oct 26 18:02 AWR_26102010_1802_prod.HTML
-rw-r--r-- 1 oracle oracle 342611 Oct 27 18:02 AWR_27102010_1802_prod.HTML
-rw-r--r-- 1 oracle oracle 282057 Oct 28 18:02 AWR_28102010_1802_prod.HTML
  1. Create AWR report between sysdate  and sysdate – hours (download)

#       The file “.run_awr” in the “$HOME” directory contains one or more
#       lines with the following format, three fields delimited by “semicolon”:
#
#               TNS-connect-string : recipient-list : hrs

    2.Create AWR report between sysdate-days  and sysdate – hours  (download)

#       The file “.run_awr” in the “$HOME” directory contains one or more
#       lines with the following format, three fields delimited by “semicolon”:
#
#               TNS-connect-string : recipient-list : daysInPast : hrs

#!/usr/bin/ksh

 #==============================================================================
# File:         run_awr.sh
# Type:         korn shell script
#
# Description:
#       UNIX Korn-shell script to run under the UNIX "cron" utility to
#       automatically generate and email Oracle "AWR" reports in HTML against
#       the database accessed via the specified TNS connect-string, to a
#       specified list of email addresses.
#
# Parameters:
#       Zero, one, or more parameters may be passed.  These parameters
#       are TNS connect-strings, each of which refer to entries in the
#       script's configuration file (named ".run_awr", described below).
#
#       If no parameters are specified, then the script processes all of
#       the lines in the configuration file.
#
#       For each of the parameters specified, the script will process
#       each of the corresponding lines in the configuration file.
#
#       Each TNS connect-string should be separated by whitespace.
#
# Configuration file:
#       The file ".run_awr" in the "$HOME" directory contains one or more
#       lines with the following format, three fields delimited by "commas":
#
#               TNS-connect-string : recipient-list : hrs
#
#       where:
#
#               TNS-connect-string      Oracle TNS connect-string for the db
#               recipient-list          comma-separated list of email addresses
#               hrs                     "sysdate - <hrs>" is the beginning
#                                       time of the AWR report and "sysdate"
#                                       is the ending time of the AWR report
#
# Modification history:
#==============================================================================
#
#------------------------------------------------------------------------------
# Set up Oracle environment variables...
#------------------------------------------------------------------------------
export ORACLE_SID=prod
export ORAENV_ASK=NO
. /usr/local/bin/oraenv > /dev/null 2>&1
unset ORAENV_ASK
#
#------------------------------------------------------------------------------
# Verify that the Oracle environment variables and directories are set up...
#------------------------------------------------------------------------------
if [[ "${ORACLE_HOME}" = "" ]]
then
 echo "ORACLE_HOME not set; aborting..."
 exit 1
fi
if [ ! -d ${ORACLE_HOME} ]
then
 echo "Directory \"${ORACLE_HOME}\" not found; aborting..."
 exit 1
fi
if [ ! -d ${ORACLE_HOME}/bin ]
then
 echo "Directory \"${ORACLE_HOME}/bin\" not found; aborting..."
 exit 1
fi
if [ ! -x ${ORACLE_HOME}/bin/sqlplus ]
then
 echo "Executable \"${ORACLE_HOME}/bin/sqlplus\" not found; aborting..."
 exit 1
fi
if [ ! -x ${ORACLE_HOME}/bin/tnsping ]
then
 echo "Executable \"${ORACLE_HOME}/bin/tnsping\" not found; aborting..."
 exit 1
fi
#
#------------------------------------------------------------------------------
# Set shell variables used by the shell script...
#------------------------------------------------------------------------------
_Pgm=AWR_`date '+%d%m%Y_%H%M'`
_RunAwrListFile=${HOME}/.run_awr
if [ ! -r ${_RunAwrListFile} ]
then
 echo "Script configuration file \"${_RunAwrListFile}\" not found;
aborting..."
 exit 1
fi
#
#------------------------------------------------------------------------------
# ...loop through the list of database instances specified in the ".run_awr"
# list file...
#
# Entries in this file have the format:
#
#       dbname:rcpt-list:hrs
#
# where:
#       dbname          - is the TNS connect-string of the database instance
#       rcpt-list       - is a comma-separated list of email addresses
#       hrs             - is the number of hours (from the present time)
#                         marking the starting point of the AWR report
#------------------------------------------------------------------------------
grep -v "^#" ${_RunAwrListFile} | awk -F: '{print $1" "$2" "$3}' | \
while read _ListDb _ListRcpts _ListHrs
do
 #----------------------------------------------------------------------
 # If command-line parameters were specified for this script, then they
 # must be a list of databases...
 #----------------------------------------------------------------------
 if (( $# > 0 ))
 then
 #
 #---------------------------------------------------------------
 # If a list of databases was specified on the command-line of
 # this script, then find that database's entry in the ".run_awr"
 # configuration file and retrieve the list of email recipients
 # as well as the #-hrs for the AWR report...
 #---------------------------------------------------------------
 _Db=""
 _Rcpts=""
 _Hrs=""
 for _SpecifiedDb in $*
 do
 #
 if [[ "${_ListDb}" = "${_SpecifiedDb}" ]]
 then
 _Db=${_ListDb}
 _Rcpts=${_ListRcpts}
 _Hrs=${_ListHrs}
 fi
 #
 done
 #
 #---------------------------------------------------------------
 # if the listed DB is not specified on the command-line, then
 # go onto the next listed DB...
 #---------------------------------------------------------------
 if [[ "${_Db}" = "" ]]
 then
 continue
 fi
 #---------------------------------------------------------------
 else    # ...else, if no command-line parameters were specified, then
 # just use the information in the ".run_awr" configuration file...
 #---------------------------------------------------------------
 _Db=${_ListDb}
 _Rcpts=${_ListRcpts}
 _Hrs=${_ListHrs}
 #
 fi
 #
 #----------------------------------------------------------------------
 # Verify that the name of the database is a valid TNS connect-string...
 #----------------------------------------------------------------------
 ${ORACLE_HOME}/bin/tnsping ${_Db} > /dev/null 2>&1
 if (( $? != 0 ))
 then
 echo "\"tnsping ${_Db}\" failed; aborting..."
 exit 1
 fi
 #
 #----------------------------------------------------------------------
 # Create script variables for the output files...
 #----------------------------------------------------------------------
 _TmpSpoolFile="/home/oracle/awr_reports/${_Pgm}_${_Db}.HTML"
 _AwrReportFile="${_Pgm}_${_Db}.html"
 #
 #----------------------------------------------------------------------
 # Call SQL*Plus, retrieve some database instance information, and then
 # call the AWR report as specified...
 #----------------------------------------------------------------------
 ${ORACLE_HOME}/bin/sqlplus -s /nolog << __EOF__ > /dev/null 2>&1
set echo off feedback off timing off pagesize 0 linesize 300 trimspool on
verify off heading off
connect / as sysdba

col dbid new_value V_DBID noprint
select  dbid from v\$database;

col instance_number new_value V_INST noprint
select  instance_number from v\$instance;

col snap_id new_value V_BID
select  min(snap_id) snap_id
from    dba_hist_snapshot
where   end_interval_time >= (sysdate-(${_Hrs}/24))
and     startup_time <= begin_interval_time
and     dbid = &&V_DBID
and     instance_number = &&V_INST;

col snap_id new_value V_EID
select  max(snap_id) snap_id
from    dba_hist_snapshot
where   dbid = &&V_DBID
and     instance_number = &&V_INST;

spool ${_TmpSpoolFile}
select  'BEGIN='||trim(to_char(begin_interval_time, 'HH24:MI')) snap_time
from    dba_hist_snapshot
where   dbid = &&V_DBID
and     instance_number = &&V_INST
and     snap_id = &&V_BID ;
select  'END='||trim(to_char(end_interval_time, 'HH24:MI')) snap_time
from    dba_hist_snapshot
where   dbid = &&V_DBID
and     instance_number = &&V_INST
and     snap_id = &&V_EID ;
spool off

select output from table(dbms_workload_repository.awr_report_html(&&V_DBID,
&&V_INST, &&V_BID, &&V_EID, 0))

spool /tmp/${_AwrReportFile}
/
exit success
__EOF__
 #
 #----------------------------------------------------------------------
 # Determine if the "start time" and "end time" of the AWR report was
 # spooled out...
 #----------------------------------------------------------------------
 if [ -f ${_TmpSpoolFile} ]
 then
 _BTstamp=`grep '^BEGIN=' ${_TmpSpoolFile} | awk -F= '{print
$2}'`
 _ETstamp=`grep '^END=' ${_TmpSpoolFile} | awk -F= '{print $2}'`
 fi
 #
 #----------------------------------------------------------------------
 # Determine if an AWR report was spooled out...
 #----------------------------------------------------------------------
#                if [ -f /tmp/${_AwrReportFile} ]
#                then
#
#                        uuencode /tmp/${_AwrReportFile} ${_AwrReportFile} | \
#                                mailx -s "AWR Report for ${_Db}
#        (${_BTstamp}-${_ETstamp} GMT)" ${_Rcpts}
#
#                fi
#
mv /tmp/${_AwrReportFile} ${_TmpSpoolFile}
done
#
#------------------------------------------------------------------------------
# Finish up...
#------------------------------------------------------------------------------
exit 0

Dataguard : Applying CPU(Cirital Patch Update) to Dataguard Environment

Posted by Sagar Patil

I have One Primary database and 2 Physical Standby Databases under RHEL 5. Here is a process to patch them with JULY CPU update “Critical Patch Update Release 10.2.0.4 for UNIX Released July 13, 2010”

At Primary Site
SQL> show parameter dg_broker_start

NAME                                 TYPE        VALUE
———————————— ———– ——————————
dg_broker_start                      boolean     TRUE

SQL> alter system switch logfile;
System altered.

Make sure the Logs are shipped and both Standby Database and applied. Following SQLs could be used to locate the log sequences on all databases.

Locate Primary Log Sequence :   Select Archived_Seq# From V$Archive_Dest_Status Where Dest_Id=1;

Locate Standby Log Sequence :  Select Archived_Seq#, Applied_Seq# From V$Archive_Dest_Status Where Dest_Id=2;

Locate DR Log Sequence :   Select Archived_Seq#, Applied_Seq# From V$Archive_Dest_Status Where Dest_Id=3;

Find out existing Patch details using following queries :

SELECT comments, action_time, id “PATCH_NUMBER”, version FROM sys.registry$history WHERE action = ‘CPU’;

COMMENTS ACTION_TIME PATCH_NUMBER VERSION
view recompilation 42:39.2 6452863
view recompilation 59:20.3 6452863
view recompilation 23:58.7 6452863
view recompilation 56:19.9 6452863

SELECT comments, action_time, id PATCH_NUMBER, version FROM sys.registry$history

COMMENTS ACTION_TIME PATCH_NUMBER VERSION
Upgraded from 10.2.0.1.0 40:28.8 10.2.0.4.0
CPUApr2009 46:06.0 4 10.2.0.4
view recompilation 42:39.2 6452863
CPUOct2009 56:35.7 6 10.2.0.4
view recompilation 59:20.3 6452863
CPUJan2010 01:47.4 6 10.2.0.4
view recompilation 23:58.7 6452863

Backup Primary database  & then shutdown Primary Database

SQL> shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.

Take TAR backup of ORACLE_HOME

$tar -czvf ORACLE_BKUP.tgz $ORACLE_HOME

At Standby & DR Site
[oracle@Standby bdump]$ sqlplus /nolog

SQL*Plus: Release 10.2.0.4.0 – Production on Mon Mar 29 18:26:46 2010

Copyright (c) 1982, 2007, Oracle.  All Rights Reserved.

SQL> conn / as sysdba
Connected.
SQL> select NAME,open_mode,GUARD_STATUS,DATABASE_ROLE from v$database;

NAME      OPEN_MODE  GUARD_S DATABASE_ROLE
——— ———- ——- —————-
ROD  MOUNTED    NONE    PHYSICAL STANDBY

SQL> shutdown immediate;
ORA-01109: database not open
Database dismounted.
ORACLE instance shut down. (Took 5 mins + to shutdown)

Take TAR backup of Standby ORACLE_HOME

$tar -czvf ORACLE_BKUP.tgz $ORACLE_HOME

[oracle@ ~]$ lsnrctl stop LISTENER
[oracle@ ~]$ ps -ef | grep oracle
oracle    3861  3802  0 18:45 pts/0    00:00:00 -bash
oracle    7309  7287  0 20:30 pts/1    00:00:01 -bash
oracle    8907  7309  0 21:19 pts/1    00:00:00 ps -ef
oracle    8908  7309  0 21:19 pts/1    00:00:00 grep oracle

cd $ORACLE_HOME/patches
[oracle@patches]$ ls -lrt
total 12
drwxr-xr-x  5 oracle oracle 4096 Feb 15  2010 9119284
drwxr-xr-x 37 oracle oracle 4096 Feb 15  2010 9119226
drwxr-xr-x 42 oracle oracle 4096 Jul  6 16:53 9655017

[oracle@patches]$ cd 9655017

[oracle@patches]$ $ORACLE_HOME/OPatch/opatch napply -skip_subset -skip_duplicate

Stop here go back to Primary now

oracle@ ~]$ lsnrctl stop LISTENER
[oracle@ ~]$ ps -ef | grep oracle
oracle    3861  3802  0 18:45 pts/0    00:00:00 -bash
oracle    7309  7287  0 20:30 pts/1    00:00:01 -bash
oracle    8907  7309  0 21:19 pts/1    00:00:00 ps -ef
oracle    8908  7309  0 21:19 pts/1    00:00:00 grep oracle

cd $ORACLE_HOME/patches
[oracle@patches]$ ls -lrt
total 12
drwxr-xr-x  5 oracle oracle 4096 Feb 15  2010 9119284
drwxr-xr-x 37 oracle oracle 4096 Feb 15  2010 9119226
drwxr-xr-x 42 oracle oracle 4096 Jul  6 16:53 9655017

[oracle@patches]$ cd 9655017

[oracle@patches]$ $ORACLE_HOME/OPatch/opatch napply -skip_subset -skip_duplicate

For each database instance running on the Oracle home being patched, connect to the database using SQL*Plus.

cd $ORACLE_HOME/rdbms/admin
sqlplus /nolog
SQL> CONNECT / AS SYSDBA
SQL> STARTUP
SQL> @catbundle.sql cpu apply
SQL> — Execute the next statement only if this is the first 10.2.0.4 CPU applied in the Oracle home.
SQL> @utlrp.sql
SQL> QUIT

Run the view recompilation script. Note that this script is run with the database in upgrade mode, which restricts connections as SYSDBA.

cd $ORACLE_HOME/cpu/view_recompile
sqlplus /nolog
SQL> CONNECT / AS SYSDBA
SQL> SHUTDOWN IMMEDIATE
SQL> STARTUP UPGRADE
SQL> @view_recompile_jan2008cpu.sql
SQL> SHUTDOWN;
SQL> STARTUP;
SQL> QUIT

Startup Primary Listener

At Standby & DR Site

SQL> startup nomount;
ORACLE instance started.
Total System Global Area 2147483648 bytes
Fixed Size 2030296 bytes
Variable Size 503317800 bytes
Database Buffers 1627389952 bytes
Redo Buffers 14745600 bytes

SQL> show parameter dg_broker_start
NAME TYPE VALUE
———————————— ———– ——————————
dg_broker_start boolean TRUE

SQL> alter database mount standby database;
Database altered.

SQL> recover managed standby database disconnect from session;
Media recovery complete.

Startup Listeners and monitor alrtlogs at Stanby servers for Log Recovery

Redo Shipping Client Connected as PUBLIC
— Connected User is Valid
RFS[3]: Assigned to RFS process 7804
RFS[3]: Identified database type as ‘physical standby’
RFS[3]: Archived Log: ‘/u06/oradata/prod/arch/1_65479_684936861.arc’
Wed Sep 29 22:08:08 2010
Media Recovery Log /u06/oradata/prod/arch/1_65469_684936861.arc
Wed Sep 29 22:08:08 2010
RFS[1]: Archived Log: ‘/u06/oradata/prod/arch/1_65477_684936861.arc’
Wed Sep 29 22:08:09 2010
Media Recovery Log /u06/oradata/prod/arch/1_65470_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65471_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65472_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65473_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65474_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65475_684936861.arc
Media Recovery Log /u06/oradata/prod/arch/1_65476_684936861.arc

Finally run following commands to make sure logs are appied at standbydatabases:

Locate Primary Log Sequence :   Select Archived_Seq# From V$Archive_Dest_Status Where Dest_Id=1;

Locate Standby Log Sequence :  Select Archived_Seq#, Applied_Seq# From V$Archive_Dest_Status Where Dest_Id=2;

Locate DR Log Sequence :   Select Archived_Seq#, Applied_Seq# From V$Archive_Dest_Status Where Dest_Id=3;

Sample log files attached  opatch_history-1.txt & opatch.log

Script to Auto Start Primary/Standby Database under Linux

Posted by Sagar Patil

The following represents the Oracle recommended method for automating database startup and shutdown of Oracle 10g instances.

Once the instance is created, edit the “/etc/oratab” file setting the restart flag for each instance to ‘Y’.
dev:/u01/app/oracle/product/10.2.0:N
Change N to Y
dev:/u01/app/oracle/product/10.2.0:Y

Next, create a file called “/etc/init.d/dbora” as the root user, containing the following.

#!/bin/sh
# chkconfig: 345 99 10
# description: Oracle auto start-stop script.
#
# Set ORA_HOME to be equivalent to the $ORACLE_HOME
# from which you wish to execute dbstart and dbshut;
#
# Set ORA_OWNER to the user id of the owner of the
# Oracle database in ORA_HOME.

ORA_HOME=/u01/app/oracle/product/10.2.0/
ORA_OWNER=oracle

if [ ! -f $ORA_HOME/bin/dbstart ]
then
echo “Oracle startup: cannot start”
exit
fi

case “$1” in
‘start’)
# Start the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su – $ORA_OWNER -c “$ORA_HOME/bin/dbstart $ORA_HOME”
;;
‘stop’)
# Stop the Oracle databases:
# The following command assumes that the oracle login
# will not prompt the user for any values
su – $ORA_OWNER -c “$ORA_HOME/bin/dbshut $ORA_HOME”
;;
esac

set the chmod command to set the privileges to 750
chmod 750 /etc/init.d/dbora

Associate the dbora service with the appropriate run levels and set it to auto-start using the following command.
chkconfig –add dbora

The default TNS listener (LISTENER) will be started by dbshut/dbstart scripts.

If you are using customized listener name like LISTENER_TEST, LISTENER_PRE then please edit dbshut and dbstart lines as below

— At DBSHUT
$ORACLE_HOME_LISTNER/bin/lsnrctl LISTENER stop    “Change to”
$ORACLE_HOME_LISTNER/bin/lsnrctl LISTENER_TEST stop

–AT DBSTART
$ORACLE_HOME_LISTNER/bin/lsnrctl LISTENER start    “Change to”
$ORACLE_HOME_LISTNER/bin/lsnrctl LISTENER_TEST start

The relevant instances should now startup/shutdown automatically at system startup/shutdown

Trend Oracle log history : How much archives created per day/week or in an hour

Posted by Sagar Patil

Count of archive files and size of the redo generated by day

SELECT   A.*, ROUND (A.Count# * B.AVG# / 1024 / 1024) Daily_Avg_Mb
  FROM   (  SELECT   TO_CHAR (First_Time, 'YYYY-MM-DD') DAY,
                     COUNT (1) Count#,
                     MIN (RECID) Min#,
                     MAX (RECID) Max#
              FROM   v$log_history
          GROUP BY   TO_CHAR (First_Time, 'YYYY-MM-DD')
          ORDER BY   1 DESC) A, (SELECT   AVG (BYTES) AVG#,
                                          COUNT (1) Count#,
                                          MAX (BYTES) Max_Bytes,
                                          MIN (BYTES) Min_Bytes
                                   FROM   v$log) B;
DAY LOG COUNT DAILY_AVG_MB
02/07/2010 98 4480
01/07/2010 126 5760
30/06/2010 54 2469
29/06/2010 28 1280
28/06/2010 37 1691
27/06/2010 14 640
26/06/2010 14 640
25/06/2010 14 640
24/06/2010 19 869
23/06/2010 14 640

Copy this table into Excel & click on Chart Wizard. Select X,Y co-ordinates and you will get a nice picture like this

Count of archive files and size of the redo generated every hour

  SELECT   TO_CHAR (first_time, 'YYYY-MON-DD') day,
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '00', 1, 0)),
                    '99')
              "00",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '01', 1, 0)),
                    '99')
              "01",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '02', 1, 0)),
                    '99')
              "02",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '03', 1, 0)),
                    '99')
              "03",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '04', 1, 0)),
                    '99')
              "04",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '05', 1, 0)),
                    '99')
              "05",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '06', 1, 0)),
                    '99')
              "06",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '07', 1, 0)),
                    '99')
              "07",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '08', 1, 0)),
                    '99')
              "0",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '09', 1, 0)),
                    '99')
              "09",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '10', 1, 0)),
                    '99')
              "10",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '11', 1, 0)),
                    '99')
              "11",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '12', 1, 0)),
                    '99')
              "12",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '13', 1, 0)),
                    '99')
              "13",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '14', 1, 0)),
                    '99')
              "14",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '15', 1, 0)),
                    '99')
              "15",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '16', 1, 0)),
                    '99')
              "16",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '17', 1, 0)),
                    '99')
              "17",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '18', 1, 0)),
                    '99')
              "18",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '19', 1, 0)),
                    '99')
              "19",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '20', 1, 0)),
                    '99')
              "20",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '21', 1, 0)),
                    '99')
              "21",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '22', 1, 0)),
                    '99')
              "22",
           TO_CHAR (SUM (DECODE (TO_CHAR (first_time, 'HH24'), '23', 1, 0)),
                    '99')
              "23"
    FROM   v$log_history
GROUP BY   TO_CHAR (first_time, 'YYYY-MON-DD')
order by DAY desc;

What is Oracle OPatch, How to use OPatch & List patches

Posted by Sagar Patil

Patch is an Oracle supplied utility to assist you with the process of applying interim patches to Oracle’s software. OPatch is a Java-based utility which requires the Oracle Universal Installer to be installed. It is platform independent and runs on all supported operating systems.

OPatch supports the following:
* Applying an interim patch.
* Rolling back the application of an interim patch.
* Detecting conflict when applying an interim patch after previous interim patches have been applied. It also suggests the best options to resolve a conflict.
* Reporting on installed products and interim patch.

Prior to release 10.2 (OPatch for 10.2 is only compatible with 10.2 and nothing earlier), OPatch was available from MetaLink as a patch in and of itself (p2617419_10102_GENERIC.zip for release 10.1.0.2). With 10.2, OPatch is installed as part of the RDBMS software.

Opatch has several options to execute it:
* “lsinventory” = Adding the lsinventory option to opatch produces a report saying the patches that were applied
* “-report” = Will report to screen and will NOT apply the real patch. Using “-report” is a good way of performing nondestructive testing of the patch installation process.
* “rollback”= Will undo the patch that was applied. Example: opatch rollback –id 4667809

Applying a patch is simple as:
* opatch lsinventory    -> To see he list of patches already installed
* opatch apply <patchid>     –> To REALLY aplply the patch

Example Applying Patch 4751921

After the Patch is Installed:
1.Log in as sys as sysdba.
2. cd $ORACLE_HOME/rdbms/admin
3. spool catpatch_database_name
4. shutdown immediate
5. startup migrate
6. @catpatch.sql   ( this takes at least 1 hour ). After catpatch completed,
7. select object_name,owner from dba_objects where ststus=’INVALID’;
( YOU WILL GET BETWEEN 230-3300 INVALID OBJECTS , DON’T PANIC )
8. @utlrp.sql
9.select object_name,owner from dba_objects where ststus=’INVALID’;   ( YOU WILL GET near 0 invalid objects )
10. shutdown immediate;
11. startup


Listing Patches

All patches that are installed with Oracle’s OPatch Utility (Oracle’s Interim Patch Installer) can be listed by invoking the opatch command with the lsinventory option. Here is an example:

$ cd $ORACLE_HOME/OPatch
$ opatch lsinventory
Invoking OPatch 10.2.0.1.0
Oracle interim Patch Installer version 10.2.0.1.0
Copyright (c) 2005, Oracle Corporation.  All rights reserved..

Installed Top-level Products (1):
Oracle Database 10g                                           10.2.0.1.0
There are 1 products installed in this Oracle Home.
There are no Interim patches installed in this Oracle Home.
OPatch succeeded.

Another Method using SYS.REGISTRY$HISTORY Table

Since January 2006, contains 1 row for most recent CPU patch applied. A method for determining if CPU patch is applied

SELECT comments, action_time, id “PATCH_NUMBER”, version FROM sys.registry$history WHERE action = ‘CPU’;

COMMENTS ACTION_TIME PATCH_NUMBER VERSION
view recompilation 42:39.2 6452863
view recompilation 59:20.3 6452863
view recompilation 23:58.7 6452863

SELECT comments, action_time, id “PATCH_NUMBER”, version FROM sys.registry$history

COMMENTS ACTION_TIME PATCH_NUMBER VERSION
Upgraded from 10.2.0.1.0 40:28.8 10.2.0.4.0
CPUApr2009 46:06.0 4 10.2.0.4
view recompilation 42:39.2 6452863
CPUOct2009 56:35.7 6 10.2.0.4
view recompilation 59:20.3 6452863
CPUJan2010 01:47.4 6 10.2.0.4
view recompilation 23:58.7 6452863

One other useful Opatch feature

Along with the log and inventory files, Opatch output includes a history file, which contains date and action performed information. The history file is named opatch_history.txt and is located in the $OH\cfgtools\opatch directory. As an example of its contents, the “rollback –help” action performed earlier was recorded as:

[oracle@ opatch]$ pwd
/u01/app/oracle/product/10.2.0/cfgtoollogs/opatch
[oracle@opatch]$ ls -lrt
-rw-r–r– 1 oracle oracle  98608 May 29  2009 opatch2009-05-29_11-37-50AM.log
-rw-r–r– 1 oracle oracle 103814 Dec 14  2009 opatch2009-12-14_20-49-31PM.log
-rw-r–r– 1 oracle oracle   5838 Mar 11  2010 opatch2010-03-11_16-01-00PM.log
-rw-r–r– 1 oracle oracle  33878 Mar 29  2010 opatch2010-03-29_19-53-07PM.log

vi opatch2010-03-29_19-53-07PM.log

Applying patch 9173244…
INFO:Starting Apply Session at Mon Mar 29 19:53:42 BST 2010
INFO:ApplySession applying interim patch ‘9173244’ to OH ‘/u01/app/oracle/product/10.2.0’
INFO:Starting to apply patch to local system at Mon Mar 29 19:53:42 BST 2010
INFO:Start the Apply initScript at Mon Mar 29 19:53:42 BST 2010
INFO:Finish the Apply initScript at Mon Mar 29 19:53:42 BST 2010
INFO:OPatch detected ARU_ID/Platform_ID as 226
INFO:Start saving patch at Mon Mar 29 19:53:44 BST 2010

Database hanging due to TX-Transaction (row lock contention)

Posted by Sagar Patil

My Statspack has following details.

Enqueue activity DB/Inst: Snaps: 1791-1808
-> only enqueues with waits are shown
-> Enqueue stats gathered prior to 10g should not be compared with 10g data
-> ordered by Wait Time desc, Waits desc

Above list indicates oracle was waiting about 4 secs to acquire a lock. Let’s look at row contention and enqueues closely.

I was looking on google and stumbled across exceptional article “Diagnose performance problems, using the wait interface in Oracle 10g.”
John, the DBA at Acme Bank, is on the phone with an irate user, Bill, who complains that his database session is hanging, a complaint not unfamiliar to most DBAs. What can John do to address Bill’s complaint?
Acme Bank’s database is Oracle Database 10g, so John has many options. Automatic Database Diagnostic Manager (ADDM), new in Oracle Database 10g, can tell John about the current overall status and performance of the database, so John starts with ADDM to determine whether what Bill’s session is experiencing is the result of a databasewide issue. The ADDM report identifies no databasewide issues that could have this impact on Bill’s session, so John moves on to the next option.

One way to diagnose session-level events such as Bill’s is to determine whether the session is waiting for anything, such as the reading of a block of a file, a lock on a table row, or a latch. Oracle has provided mechanisms to display the waits happening inside the database since Oracle7, and during the last several years, the model has been steadily perfected, with more and more diagnostic information added to it. In Oracle Database 10g, which makes significantly improved wait event information available, diagnosing a session slowdown has become even easier. This article shows you how to use the wait events in Oracle Database 10g to identify bottlenecks.

Session Waits : How can John the DBA determine what’s causing Bill’s session to hang? Actually, the session is not hanging; it’s waiting for an event to happen, and that’s exactly what John checks for.

To continue his investigation, John could use Oracle Enterprise Manager or he could directly access V$ views from the command line. John has a set of scripts he uses to diagnose these types of problems, so he uses the command line.

John queries the V$SESSION view to see what Bill’s session is waiting for. (Note that John filters out all idle events.)

select sid, username, event, blocking_session,
seconds_in_wait, wait_time
from v$session where state in (‘WAITING’)
and wait_class != ‘Idle’;

The output follows, in vertical format.
SID : 270
USERNAME : BILL
EVENT : enq: TX – row lock
contention
BLOCKING_SESSION : 254
SECONDS_IN_WAIT : 83
WAIT_TIME : 0

Looking at this information, John immediately concludes that Bill’s session with SID 270 is waiting for a lock on a table and that that lock is held by session 254 (BLOCKING_SESSION).

But John wants to know which SQL statement is causing this lock. He can find out easily, by issuing the following query joining the V$SESSION and V$SQL views:

select sid, sql_text
from v$session s, v$sql q
where sid in (254,270)
and (
q.sql_id = s.sql_id or
q.sql_id = s.prev_sql_id);

Listing 1 shows the result of the query. And there (in Listing 1) John sees it?both sessions are trying to update the same row. Unless session 254 commits or rolls back, session 270 will continue to wait for the lock. He explains this to Bill, who, considerably less irate now, decides that something in the application has gone awry and asks John to kill session 254 and release the locks.

Wait Classes After John kills the blocking session, Bill’s session continues but is very slow. John decides to check for other problems in the session. Again, he checks for any other wait events, but this time he specifically checks Bill’s session.

In Oracle Database 10g, wait events are divided into various wait classes, based on their type. The grouping of events lets you focus on specific classes and exclude nonessential ones such as idle events. John issues the following against the V$SESSION_WAIT_CLASS view:

select wait_class_id, wait_class,
total_waits, time_waited
from v$session_wait_class
where sid = 270;

The output, shown in Listing 2, shows the wait classes and how many times the session has waited for events in each class. It tells John that application-related waits such as those due to row locks have occurred 17,760 times, for a total of 281,654 centiseconds (cs)?hundredths of a second?since the instance started. John thinks that this TIME_WAITED value is high for this session. He decides to explore the cause of these waits in the application wait class. The times for individual waits are available in the V$SYSTEM_EVENT view. He issues the following query to identify individual waits in the application wait class (class id 4217450380):

select event, total_waits, time_waited
from v$system_event e, v$event_name n
where n.event_id = e.event_id
and wait_class_id = 4217450380;

Listing 3 shows the output of this query. It shows that lock contentions (indicated by the event enq: TX – row lock contention) constitute the major part of the waiting time in the application wait class. This concerns John. Is it possible that a badly written application made its way through to the production database, causing these lock contention problems?

Being the experienced DBA that he is, however, John does not immediately draw that conclusion. The data in Listing 3 merely indicates that the users have experienced lock-contention-related waits a total of 2,275 times, for 280,856 cs. It is possible that mostly 1- or 2-cs waits and only one large wait account for the total wait time, and in that case, the application isn’t faulty. A single large wait may be some freak occurrence skewing the data and not representative of the workload on the system. How can John determine whether a single wait is skewing the data?

Oracle 10g provides a new view, V$EVENT_HISTOGRAM, that shows the wait time periods and how often sessions have waited for a specific time period. He issues the following against V$EVENT_HISTOGRAM:

select wait_time_milli bucket, wait_count
from v$event_histogram
where event =
‘enq: TX – row lock contention’;
The output looks like this:

BUCKET WAIT_COUNT
———– ———-
1 252
2 0
4 0
8 0
16 1
32 0
64 4
128 52
256 706
512 392
1024 18
2048 7
4096 843

The V$EVENT_HISTOGRAM view shows the buckets of wait times and how many times the sessions waited for a particular event?in this case, a row lock contention?for that duration. For example, sessions waited 252 times for less than 1 millisecond (ms), once less than 16
ms but more than 1 ms, and so on. The sum of the values of the WAIT_COUNT column is 2,275, the same as the value shown in the event enq: TX – row lock contention, shown in Listing 3. The V$EVENT_HISTOGRAM view shows that the most waits occurred in the ranges of 256 ms, 512 ms, and 4,096 ms, which is sufficient evidence that the applications are experiencing locking issues and that this locking is the cause of the slowness in Bill’s session. Had the view showed numerous waits in the 1-ms range, John wouldn’t have been as concerned, because the waits would have seemed normal.

Time Models : Just after John explains his preliminary findings to Bill, Lora walks in with a similar complaint: Her session SID 355 is very slow. Once again, John looks for the events the session is waiting for, by issuing the following query against the V$SESSION_WAIT view:

select event, seconds_in_wait,
wait_time
from v$session_wait
where sid = 355;

The output, shown in Listing 4, shows a variety of wait events in Lora’s session, including latch contention, which may be indicative of an application design problem. But before he sends Lora off with a prescription for an application change, John must support his theory that bad application design is the cause of the poor performance in Lora’s session. To test this theory, he decides to determine whether the resource utilization of Lora’s session is extraordinarily high and whether it slows not only itself but other sessions too.

In the Time Model interface of Oracle Database 10g, John can easily view details of time spent by a session in various activities. He issues the following against the V$SESS_TIME_MODEL view:

select stat_name, value
from v$sess_time_model
where sid = 355;

The output, shown in Listing 5, displays the time (in microseconds) spent by the session in various places. From this output, John sees that the session spent 503,996,336 microseconds parsing (parse time elapsed), out of a total of 878,088,366 microseconds on all SQL execution (sql execute elapsed time), or 57 percent of the SQL execution time, which indicates that a cause of this slowness is high parsing. John gives Lora this information, and she follows up with the application design team.

OS Statistics :While going over users’ performance problems, John also wants to rule out the possibility of the host system’s being a bottleneck. Before Oracle 10g, he could use operating system (OS) utilities such as sar and vmstat and extrapolate the metrics to determine contention. In Oracle 10g, the metrics at the OS level are collected automatically in the database. To see potential host contention, John issues the following query against the V$OSSTAT view:

select * from v$osstat;

The output in Listing 6 shows the various elements of the OS-level metrics collected. All time elements are in cs. From the results in Listing 6, John sees that the single CPU of the system has been idle for 51,025,805 cs (IDLE_TICKS) and busy for 2,389,857 cs (BUSY_TICKS), indicating a CPU that is about 4 percent busy. From this he concludes that the CPU is not a bottleneck on this host. Note that if the host system had more than one CPU, the columns whose headings had the prefix AVG_, such as AVG_IDLE_TICKS, would show the average of these metrics over all the CPUs.

Active Session History: So far the users have consulted John exactly when each problem occurred, enabling him to peek into the performance views in real time. This good fortune doesn’t last long?Janice comes to John complaining about a recent performance problem. When John queries the V$SESSION view, the session is idle, with no events being waited for. How can John check which events Janice’s session was waiting for when the problem occurred?

Oracle 10g collects the information on active sessions in a memory buffer every second. This buffer, called Active Session History (ASH), which can be viewed in the V$ACTIVE_SESSION_HISTORY dynamic performance view, holds data for about 30 minutes before being overwritten with new data in a circular fashion. John gets the SID and SERIAL# of Janice’s session and issues this query against the V$ACTIVE_SESSION_HISTORY view to find out the wait events for which this session waited in the past.

select sample_time, event, wait_time
from v$active_session_history
where session_id = 271
and session_serial# = 5;

The output, excerpted in Listing 7, shows several important pieces of information. First it shows SAMPLE_TIME?the time stamp showing when the statistics were collected?which lets John tie the occurrence of the performance problems to the wait events. Using the data in the V$ACTIVE_SESSION_HISTORY view, John sees that at around 3:17 p.m., the session waited several times for the log buffer space event, indicating that there was some problem with redo log buffers. To further aid the diagnosis, John identifies the exact SQL statement executed by the session at that time, using the following query of the V$SQL view:

select sql_text, application_wait_time
from v$sql
where sql_id in (
select sql_id
from v$active_session_history
where sample_time =
’22-FEB-04 03.17.31.188 PM’
and session_id = 271
and session_serial# = 5
);

The output is shown in Listing 8.
The column APPLICATION_WAIT_TIME shows how long the sessions executing that SQL waited for the application wait class. In addition to the SQL_ID, the V$ACTIVE_SESSION_HISTORY view also lets John see specific rows being waited for (in case of lock contentions), client identifiers, and much more.

What if a user comes to John a little late, after the data is overwritten in this view? When purged from this dynamic performance view, the data is flushed to the Active Workload Repository (AWR), a disk-based repository. The purged ASH data can be seen in the DBA_HIST_ACTIVE_SESSION_HIST view, enabling John to see the wait events of a past session. The data in the AWR is purged by default after seven days.

Conclusion : Oracle Database 10g introduces a number of enhancements designed to automate and simplify the performance diagnostic process. Wait event information is more elaborate in Oracle Database 10g and provides deeper insight into the cause of problems, making the diagnosis of performance problems a breeze in most cases, especially in proactive performance tuning.

Identifying Blocking Locks/ Contention & Resolving Database Hanging Issue

Posted by Sagar Patil

One of my 10.2.0.1 database was hanging despite allocating ample opf resources. The CPU, Memory,Disk accesses were normal but under heavy load server would freeze for no valid reason. I had to raise TAR with Oracle.

My Statspack report had following details

—–Enqueue activity DB/Inst:
-> only enqueues with waits are shown
-> Enqueue stats gathered prior to 10g should not be compared with 10g data
-> ordered by Wait Time desc, Waits desc

Enqueue Type (Request Reason)
——————————————————————————
Requests Succ Gets Failed Gets Waits Wt Time (s) Av Wt Time(ms)
———— ———— ———– ———– ———— ————–
TX-Transaction (row lock contention)
6,611 6,615 0 6,615 32,521 4,916.24

Above list indicates oracle was waiting about 4 secs to get lock . Enqueue is a SGA memory structure used by Oracle.

Oracle support suggested to locate hot sql and hot block like table/index/cluster to reoslve this issue.

There are some articles on google but this PDF “Resolving_Oracle_Latch_Contention.pdf” from Guy Harrison at Quest is interesting …

Avoiding Hot Blocks
Cache buffers chains latch contention is one of the most intractable types of latch contention. There are a couple of things you can do at the application level to reduce the severity of this type of contention.
Firstly, identify the blocks that are hot.? Metalink note 163424.1, “How to Identify a Hot Block Within The Database”? describes how to do this. Having identified the identity of the hot block, you will most likely find that it is an index root or branch block.

It was suggested by Oracle support to look at Locks and provide them historical information about it. I couldn’t see any blokcing locks i TOAD/ deadlocks at alrtlog so I was surelocks are not the isse but I had to do what Oracle suypport suggested. So here is what I did.

Unix Shell script Process Stack diagnosis.
#!/usr/bin/ksh
# Create Oracle Trace
# Script 3 : Get Dump of Pmon, Smon & lgwr processes
# Pmon,Smon & Lgwr script files created in same directory
# Please use ./option3.sh <SID> for results
#################################################################################
export PID_PMON=$(ps -ef | grep ora_pmon_$1 | sed -e ‘/pts/d’ | awk ‘{print $2}’)
export PID_SMON=$(ps -ef | grep ora_smon_$1 | sed -e ‘/pts/d’ | awk ‘{print $2}’)
export PID_LGWR=$(ps -ef | grep ora_lgwr_$1 | sed -e ‘/pts/d’ | awk ‘{print $2}’)
echo $PID_PMON
echo $PID_SMON
echo $PID_LGWR
/usr/bin/pstack $PID_PMON > $1_PMON.log
/usr/bin/pstack $PID_SMON > $1_SMON.log
/usr/bin/pstack $PID_LGWR > $1_LGWR.log

# SQL script to carry system state dump
# Please execute this file by connecting to background sqlplus sessoion on live
# Once done please upload trace files created at $ORACLE_HOME/UDUMP to Oracle support
set term off;
set scan off;
set define off;
alter session set max_dump_file_size=unlimited;
alter session set events ‘immediate trace name systemstate level 266’;
exec sys.dbms_lock.sleep(60);
alter session set max_dump_file_size=unlimited;
alter session set events ‘immediate trace name systemstate level 266’;
exec sys.dbms_lock.sleep(60);
alter session set max_dump_file_size=unlimited;
alter session set events ‘immediate trace name systemstate level 266’;
exec sys.dbms_lock.sleep(60);
alter session set max_dump_file_size=unlimited;
alter session set events ‘immediate trace name systemstate level 266’;
set term on;
set scan on;
set define on;

I have also created a small pl/sql procedure to locate blocking locks on system. This procedure will create a txt spool file with relevant details. Monitor these spool files over period of time to see which is a hot object.

CREATE OR REPLACE procedure locate_blocking_sid as
t_sid number;
t_sql_text varchar2(200);
msg varchar2(2000);
maxlinesize NUMBER := 32767;
log_file_handle UTL_FILE.file_type;
cursor c1 is
select sid, blocking_session,event, seconds_in_wait,wait_time
from sys.v_$session where state in (‘WAITING’)
and wait_class != ‘Idle’;
c1_rec c1%ROWTYPE;

cursor C2 (t_sid VARCHAR2, t_blocking_session varchar2) is
select ‘ ‘|| sql_text || sid
from sys.v_$session s, v$sql q
where sid in (t_sid,t_blocking_session)
and (
q.sql_id = s.sql_id or
q.sql_id = s.prev_sql_id);

BEGIN
log_file_handle := UTL_FILE.fopen(‘/oracle/app/oracle/admin/startp/udump’,’blocking_locks ‘||to_char(sysdate,’DD-MM-YY HH24:MI’)||’.txt’,’w’,maxlinesize);
— log_file_handle := UTL_FILE.fopen(‘/oracle/app/oracle/admin/catp/udump’,’blocking_locks’,’w’);
UTL_FILE.PUT_LINE(log_file_handle, ‘ SQL Running & V$ Session ID ;’|| ‘ Event for Wait ;’||’ Seconds in Wait ;’ || ‘ Wait Time’);
open c1;
loop
FETCH c1 INTO c1_rec;
EXIT WHEN c1%NOTFOUND;

BEGIN
open c2(c1_rec.sid,c1_rec.blocking_session);
Loop
FETCH c2 into msg;
msg := msg || ‘ ; ‘|| trim(c1_rec.event)|| ‘ ;’|| c1_rec.seconds_in_wait||’ ; ‘||c1_rec.wait_time;
EXIT WHEN c2%NOTFOUND;
UTL_FILE.PUT_LINE(log_file_handle,msg );
End Loop;
close c2;
END;
end loop;
close c1;
UTL_FILE.FCLOSE(log_file_handle);
end;
/

The Oracle 10g Scheduler

Posted by Sagar Patil

Oracle 10g’s DBMS_SCHEDULER provides significant improvements over DBMS_JOB for scheduling jobs and tasks. Let’s take an overview of new functionalities that the Oracle Scheduler provides for DBAs.

There are three basic components to DBMS scheduler: Programs,Schedules & Jobs

Programs : A program defines what Scheduler will execute. A program’s attributes include its name, its type (e.g. a PL/SQL procedure or anonymous block), and the action it is expected to perform.
Schedules : A schedule defines when and at what frequency the Scheduler will execute a particular set of tasks.
Jobs : A job assigns a specific task to a specific schedule. A job therefore tells the schedule which tasks – either one-time tasks created “on the fly,” or predefined programs – are to be run.

Job Classes: The Scheduler provides the capability to group together jobs that have similar resource demands into job classes.

Windows : Most businesses have periods of peak and off-peak. The scheduler provides concept of windows to assign resources to job classes.

Window Groups : The Scheduler also allows windows with similar scheduling properties – for example, normal business weekday off-peak time, weekends and holidays – to be collected within window groups for easier management of jobs and scheduled tasks.

Window Overlaps: It is possible to have windows overlap each other, and it does provide a simple conflict-resolution method to insure that the appropriate jobs do get the appropriate resources.

DBMS_JOB DBMS_SCHEDULER

— Schedule an Analyze job at 10PM
BEGIN
DBMS_JOB.SUBMIT (
job => :jobno
,what => ‘BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(”ORACLEDBA”);END;’
,next_date => ’29/01/2009 22:00:00′
,interval => ‘TRUNC(SYSDATE) + 1 + 22/24’);
END;

BEGIN
DBMS_SCHEDULER.CREATE_JOB (
job_name => ‘REFRESH_STATS_ORACLEDBA_SCHEMA’
,job_type => ‘PLSQL_BLOCK’
,job_action => ‘BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(”ORACLEDBA”);END;’
,start_date => ”29/01/2009 10:00 PM’
,repeat_interval => ‘FREQ=DAILY’
,enabled => TRUE
,comments => ‘Refreshes the ORACLEDBA Schema every night at 10 PM’);
END;

Let’s put the theory into practise. One of the common task as a DBA is setting up ANALYZE job for number of Oracle Database users.Let’s try and achieve it using the new DBMS_SCHEDULER API

————– Above task could be scheuled in a single API as below
BEGIN
DBMS_SCHEDULER.DROP_JOB ( job_name => ‘ANALYZE_USERS’ ,force => TRUE);
DBMS_SCHEDULER.CREATE_JOB (
job_name => ‘ANALYZE_USERS’
,job_type => ‘PLSQL_BLOCK’
,job_action => ‘BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(”SALES”);DBMS_STATS.GATHER_SCHEMA_STATS(”Marketing”);DBMS_STATS.GATHER_SCHEMA_STATS(”HR”);END;’
,start_date => ’09/10/2009 22:00:00.000000′
,repeat_interval => ‘FREQ=DAILY’
,enabled => FALSE
,comments => ‘Refreshes the Schemas every night at 10 PM’
);
END;

Let’s try and use new feaures and strengths of 10g Scheduler

We need to create
1. A schedule object that controls when the various schema refreshes should run
2. A program object that handles calling appropriate procedure for Analyze refresh
3..A job object that invoke the program at the scheduled time

1. Creating Schedule Object: I want to schdule schema refresh on Sundays after 22:00 hours (off-peak time).

————– SQL for Creating a Schedule Object
BEGIN
DBMS_SCHEDULER.DROP_SCHEDULE( schedule_name => ‘RefreshSchemas’ ,force => TRUE );
DBMS_SCHEDULER.CREATE_SCHEDULE (
schedule_name => ‘RefreshSchemas’
,start_date => ’25/10/2009 22:00:00′
,repeat_interval => ‘FREQ=WEEKLY’
,comments => ‘Weekly schema statistics refresh’ );
END;

2. Creating Program Objects : I will create a program object named REFRESHHRSCHEMA without any arguments that will handle refreshes of just the one HR schema. I have multiple schmas so I could use following SQL for each one of them.

————– SQL for Creating a Simple Program Object
BEGIN
DBMS_SCHEDULER.CREATE_PROGRAM (program_name => ‘ReFreshSchema’
,program_type => ‘PLSQL_BLOCK’
,program_action => ‘BEGIN DBMS_STATS.GATHER_SCHEMA_STATS(”HR”);END;’
,number_of_arguments => 0
,enabled => TRUE
,comments => ‘Freshens statistics for all objects in HR schema only’
);
END;

3. Creating Job Objects: Finally, I will add a job object that assigns specific task within a specific schedule. I will need to create one job object for each schema for which statistics need to be refreshed.

————– SQL for Creating Job Using Existing Schedule and Program Objects
BEGIN
DBMS_SCHEDULER.DROP_JOB (
job_name => ‘HR_REFRESHSCHEMA’
,force => TRUE);

DBMS_SCHEDULER.CREATE_JOB (
job_name => ‘HR_REFRESHSCHEMA’
,program_name => ‘REFRESHSCHEMA’
,schedule_name => ‘REFRESHSCHEMAS’
,enabled => FALSE
,comments => ‘Refreshes the HR Schema every week’
);
END;

Now we have new scheduler and added a Analyze job but how would we locate Success /Failure of Scheduler jobs?

————– Show Scheduler database objects
TTITLE ‘Scheduler Objects:’
COL owner FORMAT A08 HEADING ‘Owner’
COL object_type FORMAT A12 HEADING ‘Object|Type’
COL object_name FORMAT A20 HEADING ‘Name’
COL created FORMAT A20 HEADING ‘Created On’
COL status FORMAT A12 HEADING ‘Status’

SELECT
owner ,object_type ,object_name ,created ,status
FROM dba_objects
WHERE object_type IN (‘PROGRAM’, ‘JOB’, ‘JOB CLASS’, ‘SCHEDULE’, ‘WINDOW’)
ORDER BY object_type, OBJECT_name;

————– Show Schedule objects
TTITLE ‘Schedules:’
COL owner FORMAT A08 HEADING ‘Owner’
COL schedule_name FORMAT A20 HEADING ‘Schedule’
COL start_date FORMAT A20 HEADING ‘Starts|On’
COL end_date FORMAT A20 HEADING ‘Ends|On’
COL repeat_interval FORMAT A45 HEADING ‘Interval’

SELECT
owner ,schedule_name ,to_char(start_date, ‘mm/dd/yyyy hh24:mi:ss’) start_date ,to_char(end_date, ‘mm/dd/yyyy hh24:mi:ss’) end_date ,repeat_interval
FROM dba_scheduler_schedules;

————– Show Program objects : What program objects are available?
TTITLE ‘Programs:’
COL owner FORMAT A08 HEADING ‘Owner’
COL program_name FORMAT A20 HEADING ‘Program’
COL program_type FORMAT A16 HEADING ‘Type’
COL program_action FORMAT A32 HEADING ‘Action’

SELECT
owner
,program_name
,program_type
,program_action
FROM dba_scheduler_programs;

— What program’s arguments are attached?
TTITLE ‘Program Arguments:’
COL owner FORMAT A08 HEADING ‘Owner’
COL program_name FORMAT A20 HEADING ‘Program’
C
OL argument_name FORMAT A12 HEADING ‘Arg Name’
COL argument_position FORMAT 999 HEADING ‘Arg|Pos’
COL argument_type FORMAT A12 HEADING ‘Arg Type’
COL default_value FORMAT A12 HEADING ‘Default|Value’
COL out_argument FORMAT A06 HEADING ‘Out|Arg?’

SELECT owner ,program_name ,argument_name ,argument_position ,argument_type ,default_value ,out_argument
FROM dba_scheduler_program_args;

————– Show Job objects
COL owner FORMAT A08 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL program_name FORMAT A20 HEADING ‘Program|Name’
COL job_class FORMAT A24 HEADING ‘Job Class’
COL job_type FORMAT A12 HEADING ‘Job|Type’
COL job_action FORMAT A12 HEADING ‘Job|Action’

TTITLE ‘Jobs:’
SELECT
owner ,job_name ,state ,job_class ,job_type ,job_action
FROM dba_scheduler_jobs;

COL owner FORMAT A08 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL program_name FORMAT A20 HEADING ‘Program|Name’
COL schedule_name FORMAT A20 HEADING ‘Schedule|Name’
COL enabled FORMAT A08 HEADING ‘Enabled?’
COL state FORMAT A08 HEADING ‘State’
COL restartable FORMAT A08 HEADING ‘Restart|-able?’
COL start_date FORMAT A32 HEADING ‘Start|Date’

TTITLE ‘Job Components:’
SELECT
owner ,job_name ,program_name ,schedule_name ,enabled ,state ,restartable ,start_date
FROM dba_scheduler_jobs;

————– What are a Job’s arguments?
COL job_name FORMAT A20 HEADING ‘Job’
COL argument_name FORMAT A12 HEADING ‘Arg Name’
COL argument_position FORMAT 999 HEADING ‘Arg|Pos’
COL value FORMAT A32 HEADING ‘Argument Value’

TTITLE ‘Job Arguments:’
SELECT job_name ,argument_name ,argument_position ,value
FROM dba_scheduler_job_args;

————– Show Scheduled Tasks History: Show a high-level view of scheduled task execution history
COL log_id FORMAT 9999 HEADING ‘Log#’
COL log_date FORMAT A32 HEADING ‘Log Date’
COL owner FORMAT A06 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL status FORMAT A10 HEADING ‘Status’

TTITLE ‘Scheduled Tasks History:’
SELECT
log_id ,log_date ,owner ,job_name ,status
FROM dba_scheduler_job_log;

————– What scheduled tasks failed during execution, and Error details
COL log_id FORMAT 9999 HEADING ‘Log#’
COL log_date FORMAT A32 HEADING ‘Log Date’
COL owner FORMAT A06 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL status FORMAT A10 HEADING ‘Status’
COL actual_start_date FORMAT A32 HEADING ‘Actual|Start|Date’
COL error# FORMAT 999999 HEADING ‘Error|Nbr’

TTITLE ‘Scheduled Tasks That Failed:’
SELECT
log_id ,log_date ,owner ,job_name ,status ,actual_start_date ,error#
FROM dba_scheduler_job_run_details
WHERE status <> ‘SUCCEEDED’
ORDER BY actual_start_date;

————– Which jobs are running right now ?
COL owner FORMAT A06 HEADING ‘Owner’
COL job_name FORMAT A20 HEADING ‘Job’
COL session_id FORMAT 999999 HEADING ‘Session’
COL running_instance FORMAT A10 HEADING ‘Running|Instance’
COL elapsed_time FORMAT A12 HEADING ‘Elapsed|Time’

TTITLE ‘Scheduled Tasks Running Right Now:’
SELECT
owner ,job_name ,session_id ,running_instance ,elapsed_time
FROM dba_scheduler_running_jobs;

Best way to check patches on Metalink

Posted by Sagar Patil

http://metalink.oracle.com
Using Patches -> simple search with following details

Product and Family : E-Business Suite
Release : Applications R12
Patch Type : Any
Classification : Recommended
Plateform or Language : Select Relevant
Click on Go button. This search will show recommended patches.

OR

Click on Top right hand Side “Patches” , select “Quick Links to: Latest Patchsets, Mini Packs and Maintenance Packs” & you will see “Latest Oracle Applications R12 Packs”

Example : Let’s locate 10.2.0.4 patch for 10.2.0.1 baseline environment

Write a Message to the Alert Log

Posted by Sagar Patil

One can use UTL_FILE else OS editors/utilities. The best is using undocumented KSDWRT procedure of the DBMS_SYSTEM package.

This procedure has two parameters,  The first one must be “2” to write to the alert file,  the second one is the message you want to write.

execute sys.dbms_system.ksdwrt(2,to_char(sysdate)|| ‘ — ‘);

I used it to write “ORA-“ Error messages at Alrtlog to Test grid alerts for targets.

Oracle RDBMS Release Nomenclature

Posted by Sagar Patil

Before 9i

8 RDBMS Version Number
1 New Features Release Number
5 Maintenance  Release Number
1 Generic Patch Set Number
2 Platform Patch Set Number

 

After 9i 

9 RDBMS Version Number
2 Database Maintenance  Release Number
0 Application Server Release Number
1 Component Specific Release Number
2 Platform Specific Release Number

Display execution plan in SQLPLUS – plan_table script

Posted by Sagar Patil

I was trying to execute following SQL through client side tools and found network delays were contributing to the SQl execution times. This SQL normally returns in less than 50 millisec and now taking more than 700 Msec. I am addicted to TOAD -TORA GUI created Execution plans and was looking for a an easy way to get execution times as well as plans on sqlplus. Here is how you do it .

SQLPLUS> set autotrace on; – Will display formatted explain plan

SQLPLUS> set timing on;
– Oracle will automatically display the elapsed wall-clock time for each SQL command you run subsequently.
– The format is HH:MM:SS.hundredths of a second for example 00:00:00.81 is .81 seconds

If you get error like “PLAN_TABLE not found” use $ORACLE_HOME/utlxplan to create PLAN_TABLE and pass on access to relevant user.

SQLPLUS> set autotrace on;
SQLPLUS> set timing on;
SQL> select count(*) from voucher;

COUNT(*)
———-
144189820

Elapsed: 00:01:55.05 format is HH:MM:SS.hundredths of a second

Execution Plan
———————————————————-
0      SELECT STATEMENT Optimizer=FIRST_ROWS (Cost=3 Card=1)
1    0   SORT (AGGREGATE)
2    1     INDEX (FAST FULL SCAN) OF ‘VOUCHER_P0001001’ (UNIQUE) (Cost=3 Card=126665996)

Statistics
———————————————————-
0  recursive calls
0  db block gets
622158  consistent gets
621830  physical reads
0  redo size
522  bytes sent via SQL*Net to client
651  bytes received via SQL*Net from client
2  SQL*Net roundtrips to/from client
0  sorts (memory)
0  sorts (disk)
1  rows processed

PLAN_TABLE is the default table for results of the EXPLAIN PLAN statement. It is created by $ORACLE_HOME/rdbms/admin/utlxplan.sql, and it contains one row for each step in the execution plan.

For 10G:

create table PLAN_TABLE (
statement_id varchar2(30),
plan_id number,
timestamp date,
remarks varchar2(4000),
operation varchar2(30),
options varchar2(255),
object_node varchar2(128),
object_owner varchar2(30),
object_name varchar2(30),
object_alias varchar2(65),
object_instance numeric,
object_type varchar2(30),
optimizer varchar2(255),
search_columns number,
id numeric,
parent_id numeric,
depth numeric,
position numeric,
cost numeric,
cardinality numeric,
bytes numeric,
other_tag varchar2(255),
partition_start varchar2(255),
partition_stop varchar2(255),
partition_id numeric,
other long,
distribution varchar2(30),
cpu_cost numeric,
io_cost numeric,
temp_space numeric,
access_predicates varchar2(4000),
filter_predicates varchar2(4000),
projection varchar2(4000),
time numeric,
qblock_name varchar2(30)
);

Column Datatype Description
STATEMENT_ID VARCHAR2(30) Optional statement identifier specified in the EXPLAIN PLAN statement
TIMESTAMP DATE Date and time when the EXPLAIN PLAN statement was issued
REMARKS VARCHAR2(80) Place for comments that can be added to the steps of the execution plan
OPERATION VARCHAR2(30) Name of the operation performed at this step
OPTIONS VARCHAR2(255) Options used for the operation performed at this step; the string returned by the index cost function
OBJECT_NODE VARCHAR2(128) Name of the database link used to reference the object
OBJECT_OWNER VARCHAR2(30) Owner of the object
OBJECT_NAME VARCHAR2(30) Name of the object
OBJECT_INSTANCE NUMBER(38) Numbered position of the object name in the original SQL statement
OBJECT_TYPE VARCHAR2(30) Descriptive modifier that further describes the type of object
OPTIMIZER VARCHAR2(255) Current mode of the optimizer
SEARCH_COLUMNS NUMBER Number of index columns with start and stop keys (that is, the number of columns with matching predicates)
ID NUMBER(38) Identification number for this step in the execution plan
PARENT_ID NUMBER(38) ID of the next step that operates on the results of this step
POSITION NUMBER(38) Order of processing for steps with the same parent ID. For cost-based optimization, the value in the first row of the plan is the statement’s execution cost. For rule-based optimization, the value is null in the first row.
COST NUMBER(38) Cost of the current operation estimated by the cost-based optimizer (CBO)
CARDINALITY NUMBER(38) Number of rows returned by the current operation (estimated by the CBO)
BYTES NUMBER(38) Number of bytes returned by the current operation
OTHER_TAG VARCHAR2(255) Describes the function of the SQL text in the OTHER column. Values for OTHER_TAG are: 

· SERIAL – SQL is the text of a locally-executed, serial query plan. Currently, SQL is not loaded in OTHER for this case.

· SERIAL_FROM_REMOTE – SQL text shown in the OTHER column will be executed at a remote site

· PARALLEL_COMBINED_WITH_PARENT – Parent of this operation is a DFO that performs both operations in the parallel execution plan

· PARALLEL_COMBINED_WITH_CHILD – Child of this operation is a DFO that performs both operations in the parallel execution plan.

· PARALLEL_TO_SERIAL – SQL text shown in the OTHER column is the top-level of the parallel plan.

· PARALLEL_TO_PARALLEL – SQL text shown in the OTHER column is executed and output in parallel

· PARALLEL_FROM_SERIAL – Operation consumes data from a serial operation and outputs it in parallel

PARTITION_START VARCHAR2(255) Start partition of a range of accessed partitions
PARTITION_STOP VARCHAR2(255) Stop partition of a range of accessed partitions
PARTITION_ID NUMBER(38) Step that has computed the pair of values of the PARTITION_START and PARTITION_STOP columns
OTHER LONG Information about parallel execution slaves and parallel queries
DISTRIBUTION VARCHAR2(30) Distribution method
CPU_COST NUMBER(38) User-defined CPU cost
IO_COST NUMBER(38) User-defined I/O cost
TEMP_SPACE NUMBER(38) Temporary space usage of the operation (sort or hash-join) as estimated by the optimizer’s cost-based approach. For statements that use the rule-based approach, this column is null.
ACCESS_PREDICATES VARCHAR2(4000) Predicates used to locate rows in an access structure. For example, start or stop predicates for an index range scan.
FILTER_PREDICATES VARCHAR2(4000) Predicates used to filter rows before producing them
PROJECTION VARCHAR2(4000) Expressions produced by the operation
TIME NUMBER(38) Elapsed time (in seconds) of the operation as estimated by the optimizer’s cost-based approach. For statements that use the rule-based approach, this column is null.

Display partition information for a specified index

Posted by Sagar Patil

Displays partition information for the specified index, or all indexes.

SET LINESIZE 500
SET PAGESIZE 1000
SET FEEDBACK OFF
SET VERIFY OFFSELECT a.index_name,
a.partition_name,
a.tablespace_name,
a.initial_extent,
a.next_extent,
a.pct_increase,
a.num_rows
FROM   dba_ind_partitions a
WHERE  a.index_name  = Decode(‘BRANCH_STATS_IX’,’ALL’,a.index_name,’BRANCH_STATS_IX’)
ORDER BY a.index_name, a.partition_name;

Replace ‘BRANCH_STATS_IX’ with valid index_name

select ‘Alter Index ‘|| index_owner ||’.’||index_name
||’ Rebuild Partition ‘ || partition_name ||’  Online;
‘ from dba_ind_partitions a
WHERE  a.index_name  = Decode(‘BRANCH_STATS_IX’,’ALL’,a.index_name,’BRANCH_STATS_IX’)
ORDER BY a.index_name, a.partition_name

This should create script like …
Alter Index STATS.BRANCH_STATS_IX Rebuild Partition BS_20060201 Online;
Alter Index STATS.BRANCH_STATS_IX Rebuild Partition BS_20060202 Online;
Alter Index STATS.BRANCH_STATS_IX Rebuild Partition BS_20060203 Online;
Alter Index STATS.BRANCH_STATS_IX Rebuild Partition BS_20060204 Online;

Problem : Oracle reported block corruption on data file id 21,22 mapped for ABC,XYZ tablespaces.

Get list of all partitions mapped into corrupt tablespaces

SELECT a.index_name,
a.partition_name,
a.tablespace_name,
a.initial_extent,
a.next_extent,
a.pct_increase,
a.num_rows
FROM dba_ind_partitions a
WHERE a.tablespace_name in (‘ABC’,’XYZ’)
ORDER BY a.index_name, a.partition_name

Get a script to rebuild all these partitions

SELECT ‘Alter Index ‘|| index_owner ||’.’||index_name ||’ Rebuild Partition ‘ || partition_name ||’ Online; ‘
FROM dba_ind_partitions a
WHERE a.tablespace_name in (‘ABC,’XYZ)
ORDER BY a.index_name, a.partition_name;

Undo Retention , Undo Optimization

Posted by Sagar Patil

undo_retention parameter will not guarantee a RETENTION unless you define a RETENTION GUARANTEE CLAUSE on tablespace level

Actual Undo Size

SELECT   SUM (a.bytes) "UNDO_SIZE"
 FROM   v$datafile a, v$tablespace b, dba_tablespaces c
 WHERE       c.contents = 'UNDO'
 AND c.status = 'ONLINE'
 AND b.name = c.tablespace_name
 AND a.ts# = b.ts#;

UNDO_SIZE
———-
209715200

Undo Blocks per Second

SELECT   MAX (undoblks / ( (end_time - begin_time) * 3600 * 24))
 "UNDO_BLOCK_PER_SEC"
 FROM   v$undostat;

UNDO_BLOCK_PER_SEC
——————
3.12166667

DB Block Size

SELECT   TO_NUMBER (VALUE) "DB_BLOCK_SIZE [KByte]"
 FROM   v$parameter
 WHERE   name = 'db_block_size';

DB_BLOCK_SIZE [Byte]
——————–
4096

Optimal Undo Retention

209’715’200 / (3.12166667 * 4’096) = 16’401 [Sec]

Using Inline Views, you can do all in one query!

SELECT   d.undo_size / (1024 * 1024) "ACTUAL UNDO SIZE [MByte]",
 SUBSTR (e.VALUE, 1, 25) "UNDO RETENTION [Sec]",
 ROUND (
 (d.undo_size / (TO_NUMBER (f.VALUE) * g.undo_block_per_sec))
 )
 "OPTIMAL UNDO RETENTION [Sec]"
 FROM   (SELECT   SUM (a.bytes) undo_size
 FROM   v$datafile a, v$tablespace b, dba_tablespaces c
 WHERE       c.contents = 'UNDO'
 AND c.status = 'ONLINE'
 AND b.name = c.tablespace_name
 AND a.ts# = b.ts#) d,
 v$parameter e,
 v$parameter f,
 (SELECT   MAX (undoblks / ( (end_time - begin_time) * 3600 * 24))
 undo_block_per_sec
 FROM   v$undostat) g
 WHERE   e.name = 'undo_retention' AND f.name = 'db_block_size';

ACTUAL UNDO SIZE [MByte]
————————
200

UNDO RETENTION [Sec]
——————–
10800

OPTIMAL UNDO RETENTION [Sec]
—————————-
16401

Calculate Needed UNDO Size for given Database Activity

SELECT   d.undo_size / (1024 * 1024) "ACTUAL UNDO SIZE [MByte]",
 SUBSTR (e.VALUE, 1, 25) "UNDO RETENTION [Sec]",
 (TO_NUMBER (e.VALUE) * TO_NUMBER (f.VALUE) * g.undo_block_per_sec)
 / (1024 * 1024)
 "NEEDED UNDO SIZE [MByte]"
 FROM   (SELECT   SUM (a.bytes) undo_size
 FROM   v$datafile a, v$tablespace b, dba_tablespaces c
 WHERE       c.contents = 'UNDO'
 AND c.status = 'ONLINE'
 AND b.name = c.tablespace_name
 AND a.ts# = b.ts#) d,
 v$parameter e,
 v$parameter f,
 (SELECT   MAX (undoblks / ( (end_time - begin_time) * 3600 * 24))
 undo_block_per_sec
 FROM   v$undostat) g
 WHERE   e.name = 'undo_retention' AND f.name = 'db_block_size';

ACTUAL UNDO SIZE [MByte]
————————
200
UNDO RETENTION [Sec]
——————–
10800
NEEDED UNDO SIZE [MByte]
————————
131.695313

Do I need to reorganise my table?

Posted by Sagar Patil

It’s a tricky question. I have explained here by using a 33GB SAP BW driving table called VBAP which needed a major work. At end I had to rebuild this table using Quest Shareplex.

VBAP Table acquired 3,873,549 blocks = 30.2 GB in Space , The total number of rows were 15,900,000
So Rows per block = 15,900,000/3,873,549 blocks= 4 rows per block

The oracle analyze stats show Avg row length of 1256 bytes so for a 8KB block size

Ideal block count would be = 15,900,000/6 rows per block(avg row len 1256 bytes) = 2,650,000 Blocks  not 3,873,549 blocks
Oracle currently using 45% more space due to row chaining.

I have used SQL script below to locate number of blocks for a 16 million row table. The row dist count below indicate on avg we have chained rows going down upto 3 database blocks.

select rows_per_block,

count(*) blocks,
sum(rows_per_block) sum_rows
from
(
select dbms_rowid.rowid_block_number(rowid),
count(*) rows_per_block
from SAPR3.vbap
group by dbms_rowid.rowid_block_number(rowid)
)
group by rows_per_block
order by 1 desc;

select to_char(floor(rows_per_block/10)*10,’fm990′)||”’s’ rows_per_block,
count(*) blocks,
sum(rows_per_block) sum_rows
from
(
select dbms_rowid.rowid_block_number(rowid),
count(*) rows_per_block
from t1
group by dbms_rowid.rowid_block_number(rowid)
)
group by to_char(floor(rows_per_block/10)*10,’fm990′)||”’s’
order by 1 desc;

ROWS_PER_BLOCK BLOCKS SUM_ROWS
53         1     53
51         4     204
50         3     150
49         6     294
48         5     240
47         19     893
46         35     1610
45         52     2340
44         77     3388
43         143     6149
42         181     7602
41         272     11152
40         387     15480
39         606     23634
38         814     30932
37         1119     41403
36         1475     53100
35         1985     69475
34         2627     89318
33         3472     114576
32         4262     136384
31         5299     164269
30         6662     199860
29         8107     235103

Why 1… 50 rows allocated in a single block?

It must be due to bespoke development done on table. I feel when table was originally populated , it had very less data & SAP-Oracle put those many rows into a single block but over period of time things have changed and new columns for bespoke development and data pushed row chaining on table. In short I can see a need for rebuilding this table.

% of Rows retrieved thru Row Chaining
http://www.akadia.com/services/ora_chained_rows.html

SELECT name,value FROM v$sysstat WHERE name like ‘%table%fetch%ro%’;

NAME VALUE
——————————————————————————-
table fetch by rowid 68617756592
table fetch continued row 4330753

Table fetch by rowid : Number of ROWS retrieved thru Direct ROWID
table fetch continued row : Number of ROWS going thru ROW CHAINING

Oracle Undocumented Parameters

Posted by Sagar Patil

select ksppinm
from x$ksppi
where substr(ksppinm,1,1) = ‘_’;

The following is a list of undocumented parameters.

1. _db_block_cache_protect
On VMS, the DB_BLOCK_CACHE_PROTECT mechanism has been made much faster. During normal use, having it turned on shouldn’t be noticeable (the degradation is less than 1%). Developers who link non-share will need PSWAPM privilege to use this feature.

When DB_BLOCK_CACHE_PROTECT is turned on, developers may either use the VMS mailboxes with the M (MEMORY_LOG) command  or they may just examine the ring buffer in the PGA (index SMPREI_, array SMPREB_) to determine what buffer requests have been made recently. DB_BLOCK_CACHE_PROTECT will prevent certain corruption from getting to disk; although, it may crash the foreground of the instance. It will help

catch stray writes in the cache. When you try to write past the buffer size in the sga, it will fail first with a stack violation. It seems that the db_block_cache_protect has a significant performance
overhead. Preliminary testing shows that it has considerable overhead (a single update took twice as long with the parameter set to TRUE).

2. _db_block_compute_checksums
There is another new init.ora parameter, DB_BLOCK_COMPUTE_CHECKSUMS, that controls whether a checksum is put into every block before the block is written to disk. The default is FALSE. A block read validates an exiting checksum whether or not this option is enabled. A block is marked
as corrupt if a checksum fails. It helps determine corruption due to hardware problems. The incarnation number and the sequence number are added to the end of the block to help
catch corruption. If the problem (corruption) is in the middle of the block this test will not detect it. To detect this problem a checksum may be generated in the block header before every write and verified on every read.

3. _db_block_hash_buckets= “Number of database block hash buckets”
The number of hash buckets is
a) by default to be set to a prime number;
b) coerced to a prime if there is an init.ora parameter setting.
The value, if not a prime number > 2, is rounded up to the next highest
prime. I would tend not to change it unless there is latch contention on the hash chains. raising it to equal the number of buffers would clearly remove any contention (basically, this is just saying that each buffer lives on its own hash chain). Having it set too small would mean that we might have to
scan over lots of buffers to find the one we want. I think the default is to make it 1/4 of the total number of buffers

4. _db_block_multiple_hashchain_latches “Use one latch per hash chain”

5. _db_handles “System-wide simultaneous buffer operations”

6. _db_handles_cached “Buffer handles cached each process”

7. _wait_for_sync ” Wait for sync on commit ”
Wait_for_sync is an oracle generic parameter which, when set to false, will allow the system to complete commits without waiting for the redo-log buffer flushes to complete.

8. _db_block_max_scan_cnt “Maximum number of buffers to inspect when  looking for free buffer”
DB_BLOCK_MAX_SCAN_CNT is an init.ora parameter which specifies the number of unavailable buffers a process should scan before signaling DBWR to write dirty buffers from the buffer cache to disk.

9. _db_writer_scan_depth
“Number of LRU buffers for dbwr to scan when looking for dirty buffers”

10a. _db_writer_scan_depth_increment
“Add to dbwr scan depth when dbwr is behind”

10b. _db_writer_scan_depth_decrement
Subtract from dbwr scan depth when dbwr is working too hard

11. _db_large_dirty_queue
“Number of buffers which force dirty queue to be written

12. _db_block_write_batch
Number of blocks to group in each DB Writer IO
specifies the no of blocks to be written to the disk in one write operation. Should be increased till write wait time and write complete waits starts to increase.
DBWR Free Low is the number of times DBWR is invoked because a user process found at least DB_BLOCK_WRITE_BATCH/2 buffers on the dirty list. This parameter specifies the number of blocks which should be written to disk at one time. This parameter should only be increased until the statistics Write Complete Waits and Write Wait Time show growth. Write Complete
Waits is the number of times a process waited for DBWR to write a current block before making a change to a buffer.

13. _db_block_cache_clone
“Always clone data blocks on get (for debugging)”
This parameter setting has a significantly adverse affect on performance
and we were told to run without it.

14. _controlfile_enqueue_timeout
/* use parameter value (default is 900) */
/* System Parameter: control file enqueue timeout in seconds */

15. _db_no_mount_lock
add hidden parameter to not acquire mount lock
If hidden int.ora parameter _db_no_mount_lock is set to TRUE then no mount locks are acquired when the the database is mounted exlusive. This allows two databases with the same name to be simultaneously mounted. Note that this circumvents the mechanism to prevent two simultaneous startups of the same database, and is thus a dangerous parameter to set. This only affects ports that ar compiled with the SYS_DFS option enabled (i.e. VMS only).

It does not get a mount lock on the database name during startup. This allows 2 instances with the same name to run on one machine

16. _log_checkpoint_recovery_check
Check redo log after checkpoints.
Add debugging code to check the red log after a checkpoint. This code is intended to help find a problm that is generating ora-600 [3020] during recovery. This code is enabed with a new init.ora parameter:
_log_checkpoint_recovery_check=XXX, where XXX is the number of redo blocks to check. This is called in LGWR after every checkpoint. If the init.ora parameter “_log_checkpoint_recovery_check” is zero (default) it does nothing. If it is a positive value then that many blocks of redo are scanned to see that the data file blocks on disk could be recovered if there was an immediate crash. This code was introduced to catch an elusive bug that results in OERI(3020) errors occasionally during crash recovery.

17. _switch_on_stuck_recovery
Check redo Log contents after checkpoint. This is called in LGWR after every checkpoint. If this parameter is zero (default) it does nothing. If it is a positive value then that many blocks of redo are scanned to see that the data file blocks on disk could be recovered if there was an immediate crash. This code was introduced to catch an elusive bug that results in OERI(3020) errors occasionally during crash recovery. Checkpoint recovery check: this is the number of redo blocks that kcracl will verify after every LGWR generated checkpoint. Defaults to zero for no checking. When opening the named offline log for redo application and to recalculate future change thread switching this parameter is used.

18. _log_io_size=redo log IO chunk size (blocks/write)
/* System Parameter: IO chunk size */
1. that the value is o/s dependent.
2. if left at 0, the value will be automatically determined for each log
file.

19. _log_buffers_debug
/* debugging: fill redo buffers with [well known] junk after writes */
“debug redo buffers (slows things down)”

20. _log_debug_multi_instance
/* debugging : pretend multi-instance */
“debug redo multi instance code”

21. _log_entry_prebuild_threshold
/* redo entry pre-build threshold */
/* this is a bad idea for a uniprocessor , and is only helpful for a
multiprocessor when there is latch contention */
LOG_ENTRY_PREBUILD_THRESHOLD determines the maximum size of a redo entry to prebuild before the copy to the log buffer. Increasing this parameter reduces the time that the redo copy latch is held. This parameter should not be modified if it is a single processor environment or there will be memory contention.

22. _disable_logging
If this is true, redo records will not be generated — no recovery is possible if the instance crashes. It is mainly used for getting good benchmarking  results. Default is false

23. _log_blocks_during_backup
TRUE value implies before any change is made to a db_block in the buffer cache, a *complete image* of the block is copied to the redo redo log. (This explains why excessive redo would be generated for datafiles excessive redo would be generated for datafiles in hot backup mode.) There is a new init.ora parameter, LOG_BLOCKS_DURING_BACKUP, that controls whether block images ar written to the redo log during hot backup.
Default is TRUE for VM, but port specific with the default defined in sparams.h. This may beset to FALSE if the Oracle block size equals the O/S physical sector sie or if it is otherwise ensured that hot backup reads consistent versios of blocks even if those blocks are being written at the time. Put anther way, this may be set to FALSE on machines that can guarantee the aomicity of a single block I/O request.
Default is true Problem is due to split blocks.

24. _allow_resetlogs_corruption
Added new secret init.ora parameter to override error 1138.  When set to TRUE the  resetlogs option will be allowed even if there are hot backups that need  more redo applied. Unless you are certain that absolutely all redo, includig  the online logs, has been applied, then a full export and import mst be done to insure the database is internally consistant.

25. _reuse_index_loop
“number of blocks being examine for index block reuse”
/* secret system parameter to control how agressive we should walk the free
** list when attempting to reuse block – default is 5.
** Set to 0 for fast index operation which is susceptible to growth,
** Set to > 5 for slower index op but more agressive in reusing blocks */ Controls the amount of work done when looking for a block to reusse for n index entry. The value determines the number of blocks to check on the freelist when looking for a reusable block.

26. _mts_load_constants
/* hidden init.ora to set server load balancing constants */
/* fill in load balancing parameters (from _mts_load_constants) */
* PID Controller – calculate control on number of servers using:
* control = Kp * err + Kd * delta(err) + Ki * sum(err)
* where Kp = proportional, Kd = derivative constant, Ki = integral constant
* Kp,Kd,Ki can be changed with the hidden parameter _mts_load_constants
* in order to tune the system as desired.
This values should only be changed after gathering enough information to determine that the mts is not optimal.

27. _mts_fastpath
/* hidden init.ora to enable dispatcher fastpath */
default is false
* Return TRUE if the system should implement the dispatcher network
* fastpath. When enabled, the dispatcher fastpath causes network i/o
* to be buffered and only flushed when absolutely necessary. When not
* enabled, flushes will occur for every dirty virtual circuit buffer.

28. _kgl_multi_instance_lock
Only for debugging. all the _kgl_multi_instance_xxxx
“whether KGL to support multi-instance locks”
Default is 0

29. _kgl_multi_instance_pin
“whether KGL to support multi-instance pins”
Default is 0.

30. _kgl_multi_instance_invalidation
“whether KGL to support multi-instance invalidations”
Default is 0.

31. _row_cache_instance_locks
Kernel SQL Row cache management component, number of row cache instance
locks
default is 100

32. _row_cache_buffer_size
“size of row cache circular buffer”
default is 200

33. _messages
” message queue resources – dependent on # processes ”
The asynchronous message mechanism allows processes to send messages to each other. A process may send a message to a specified other process (always without waiting for a reply), may wait for a message to arrive on its queue, and may obtain the next message. Messages sent to detached processes are reliably delivered. Messages sent to foreground processes are reliably delivered as long as the process is active. The mechanism also permits sending of a simple “reply”, which is a one-byte message without queuing. It should normally be used to reply to asynchronous messages, and this is a safer technique than regular messages for responding to foreground processes. This mechanism is not used in single process mode.

34. _cpu_count
ksb – Kernel Service Background processes
“number of cpu’s for this instance”
CPU_COUNT has to be set on some platforms in order for Oracle to take advantage of multi-processor system, on others it does not have effect on performance since load balancing between processors is handled by the o/s.

35. _debug_sga
/* Debug SGA, don’t make the SGA a global section so we can set watchpoints

36. _enqueue_locks
ksq1 – Kernal Service enQueues (1)
Definitions for enqueues client objects, “locks for managed enqueues”

37. _enqueue_hash
“enqueue hash table length”

38. _enqueue_debug_multi_instance
“debug enqueue multi instance”
KST is a trace facility used for “realtime” tracing of events. Below are defined KST macros that will enable the tracing of such things as latch operations, memory assignments, etc. Tracing is done to a per process circular buffer stored in the SGA. Access to these buffers is via fixed tables. Tracing is enabled for classes of events, particular events, and ranges of events.

The tracing state may be dynamically changed with the following syntax
“ALTER TRACING”
– “ON”
– Processes begin logging events using the current enabled events
– “OFF”
– Processes stop tracing
– “ENABLE”
– Add in the events indicated in to those which are
being traced.
– “DISABLE”
– No longer trace events specified by

39._trace_buffers_per_process
Note that by default, tracing is NOT enabled. In order to enable tracing
at instance startup time, add _trace_buffers_per_process = 1

40. _trace_block_size
_trace_xxxxx (xxxxx = buffers_per_process, block_size, archive_start,
flushing, enabled, get_time_every, archive_dest etc.)
These parameters are only there for debugging purposes. Customers
or support will never have to use them.

41. _trace_archive_start
“start trace process on SGA initialization”

42. _trace_flushing
“TRWR should try to keep tracing buffers clean”

43. _trace_enabled
By default, tracing is NOT enabled. In order to enable tracing,
_trace_enabled = true

44. _trace_events
If you wish to enable tracing of waits at instance startup time, you can  either
add the line ‘_trace_events = “WAT,ALL”‘ to init.ora or execute  ‘alter tracing enable “WAT,ALL”‘ in a sqldba session.
If you wish to limit tracing to specific events, you can use the  the following syntax:
alter tracing enable “WAT,,…”  where “id” is either a specific event number, or an event range (event number 1 – event number 2).

45. _trace_archive_dest “trace archival destination”

46. _trace_file_size “trace file size”  default is 10000 blocks

47. _trace_write_batch_size “trace write batch size”  default is 32

48. _rollback_segment_initial “starting undo segment number”
Default is 1. DO NOT SPECIFY 0 AS THAT HAPPENS TO BE THE SYSTEM ROLLBACK

49. _rollback_segment_count “number of undo segments”  default is 0

50. _offline_rollback_segments
If a rollback segment is not accessible because the file it is in is offline or corrupted, one can force the system to come up without the rollback segment by specifying the rollback segment in init.ora
paramater ‘_offline_rollback_segments’. The system will come up by estimating the current system commit time since it cannot access transaction tble in the rollback segment. The system commit number is a conservative guess based on current time, the database creation time and the assumed transaction rate of 8000 tps. ONE MUST MAKE SURE THAT THE SYSTEM TIME IS SET CORRECTLY WHEN FORCING THE SYSTEM UP USING ‘_OFFLINE_ROLLBACK_SEGENTS’. A trace is written with information
about the estimated system commit number.

51. _corrupted_rollback_segments Mark a rollback segment as corrupted.

52. _label_tag_cache_size
/* hidden size of the SGA label tag comparison cache (bitcache) */
“number of tags in the label tag comparison cache”
default is 200

53. _trace_get_time_every
“Number of trace sequence numbers per call to slgcs()”
default is 200

54. _vms_bg_priority
“VMS process priority to give to background processes”
default is 4

55. _sort_use_os_files_as_temporaries
Use O/S files rather than temp segments for sorting.

56. _log_checkpoints_to_alert
Whether checkpoint messages should be written to alert.log or not. Turned
off in benchmarks.

57. _large_memory_system :
Used in internal benchmarks. Doesn’t concern us.
“Configure system to use memory and save cpu cycles, paging, etc
default is false

58. _wakeup_timeout
This is WMONs sleeptime between checks of it’s queue of processes to wake.

59. _latch_wait_posting
enable posting of processes that may be waiting for a latch after a process frees the same latch (set this parameter to a value greater than one this parameter to a value greater than one for it to take effect).

60. _sql_connect_capability_code
allows database links from a pre-7.0.15 release to work with release 7.1. It is necessary to set this parameter for database links from a V6 ?

ORA-00312: Message 312 not found; Solution

Posted by Sagar Patil

Applies to: Oracle Server – Enterprise Edition – Version: 9.2.0.6.0
This problem can occur on any platform.
Symptoms
When NLS_LANGUAGE is set to ENGLISH and NLS_TERRITORY is set to AUSTRALIA, the text related to Archival messages are not displayed. The following contents are shown in the Alert log :

Errors in file /smartdi/d03/oracle/SMTRMAN/admin/bdump/smtrman_arc0_659582.trc:
ORA-16014: Message 16014 not found; No message file for product=RDBMS,facility=ORA;
ORA-00312: Message 312 not found; No message file for product=RDBMS,facility=ORA;

But when its set back to NLS_LANGUAGE=AMERICAN and NLS_TERRITORY=AMERICA it works fine.

Cause
Bug 4600519 ” MESSAGE NOT FOUND ERRORS WHEN NLS_LANGUAGE IS SET TO ENGLISH ”
The reason is due to the missing oragb.msg/oragb.msb files and oraau.msg/oraau.msb files.

When the NLS_LANGUAGE is set to ENGLISH, Oracle looks out for the message file oragb.msg/oragb.msb
which it was not able to find and hence the behaviour.

Solution

Navigate to $ORACLE_HOME/rdbms/mesg and copy the message files ” oraus.msb/oraus.msg ” to ” oragb.msb/oragb.msg ”
$ cp oraus.msb oragb.msb
$ cp oraus.msg oragb.msg

Flashback Query

Posted by Sagar Patil

Oracle Flashback Query allows users to see a consistent view of the database as it was at a point in the past. This functionality allows comparative reporting over time and recovery from logical corruptions.

Prerequisites

Oracle Flashback Query can only be used if the server is configured to use Automatic Undo Management, rather than traditional rollback segments. The maximum time period that can be flashbacked to is defined using the UNDO_RETENTION parameter in the init.ora file. Alternatively, this parameter can be set using:

ALTER SYSTEM SET UNDO_RETENTION = <seconds>;

Using Flashback Query

Flashback Query is enabled and disabled using the DBMS_FLASHBACK package. The point in time of the flashback can be specified using the SCN or the actual time:

EXECUTE Dbms_Flashback.Enable_At_System_Change_Number(123);
EXECUTE Dbms_Flashback.Enable_At_Time(’28-AUG-01 11:00:00′);

Once you’ve finished performing all your read-only operations you can turn off flashback query using:

EXECUTE Dbms_Flashback.Disable;

The flashback query is turned off when the session ends if there is no explicit call to the disable procedure. The current system change number can be returned using the Get_System_Change_Number function.
Example Data Recovery
Assuming all data was accidentally deleted from the EMPLOYEES table at 9:05AM we could recover it using:

DECLARE
CURSOR c_emp IS
SELECT *
FROM employees;
v_row c_emp%ROWTYPE;
BEGIN
Dbms_Flashback.Enable_At_Time('28-AUG-01 09:00:00');
OPEN c_emp;
Dbms_Flashback.Disable;

LOOP
FETCH c_emp INTO v_row;
EXIT WHEN c_emp%NOTFOUND;
INSERT INTO employees VALUES
(v_row.employee_id, v_row.first_name,
v_row.last_name, v_row.email,
v_row.phone_number, v_row.hire_date,
v_row.job_id, v_row.salary,
v_row.commission_pct, v_row.manager_id,
v_row.department_id, v_row.dn);
END LOOP;
CLOSE c_emp;
COMMIT;
END;
/

Notice that the Flashback Query session is disabled after the cursor is created so that the DML operations can be performed to recover the data.

Restrictions

* The server must be configured to use Automatic Undo Management.
* No DDL or DML can be issued when using Flashback Query.
* Flashback Query does not reverse DDL operations such as DROP, only DML alterations to the data.
* Flashback Query does apply to code objects (Packages, Procedures, Function or Triggers). If invoked, the current definition will be executed against the flashback data.

Sid from Unix Process ID

Posted by Sagar Patil

Here is a situation, This afternoon I have seen my CPU hitting through the roof on a Live system. There are about 6 Oracle instances and one is chewing more than 90% of CPU time.

– I want to know which Instance /process is a culprit

  PID USERNAME PRI NICE  SIZE   RES STATE   TIME    CPU COMMAND
638523 oracle    52    0 1360M  503M run   574:22 99.10% oracle
958998 oracle    42    0 1363M  506M run   518:12 46.20% oracle
982071 oracle    42    0 1360M  502M WAIT  618:58 38.40% oracle

 

select s.username, s.status,  s.sid,     s.serial#,
       p.spid,     s.machine, s.process, s.lockwait
from   v$session s, v$process p
where  p.spid     = &oracle_pid
and    s.paddr    = p.addr;

Replace ‘&unix_pid’  with 958998

USERNAME    STATUS    SID    SERIAL#    SPID    MACHINE    PROCESS    LOCKWAIT
SYS         ACTIVE    241    38945      958998   

How to enable trace in Oracle

Posted by Sagar Patil

1. Enable trace at instance level

Put the following line in init.ora. It will enable trace for all sessions and the background
processes

to Start  : sql_trace = TRUE

to Stop  : sql_trace = FALSE

to enable tracing without restarting database run the following command in sqlplus

SQLPLUS> ALTER SYSTEM SET trace_enabled = TRUE;

2. Enable trace at session level

to Start  : ALTER SESSION SET sql_trace = TRUE;

to stop   : ALTER SESSION SET sql_trace = FALSE;

– or –

to Start  : EXECUTE dbms_session.set_sql_trace (TRUE);
to Stop   : EXECUTE dbms_session.set_sql_trace (FALSE);

– or –

to Start  : EXECUTE dbms_support.start_trace;
to Stop   : EXECUTE dbms_support.stop_trace;

3. Enable trace in another session

SELECT * FROM v$session WHERE upper(osuser) like  ‘&1’;

to start : EXECUTE dbms_support.start_trace_in_session (SID, SERIAL#);

to stop : EXECUTE dbms_support.stop_trace_in_session (SID, SERIAL#);

– or –

EXECUTE dbms_system.set_sql_trace_in_session (SID, SERIAL#, TRUE);
EXECUTE dbms_system.set_sql_trace_in_session (SID, SERIAL#, FALSE);

4. Enable trace Using ORADEBUG

SQL>  select s.username, p.spid os_process_id, p.pid oracle_process_id
  from v$session s, v$process p
where s.paddr = p.addr
   and s.username = upper(‘&user_name’);  2    3    4
Enter value for user_name: repadmin
old   4:    and s.username = upper(‘&user_name’)
new   4:    and s.username = upper(‘repadmin’)

USERNAME                       OS_PROCESS_I ORACLE_PROCESS_ID
—————————— ———— —————–
REPADMIN                       850297                      19
REPADMIN                       955904                      32
REPADMIN                       911971                      20
REPADMIN                       845029                      18
REPADMIN                       533889                      30

SQL> oradebug setospid 533889;
Oracle pid: 30, Unix process pid: 533889, image: oracle@bmau29.uk.pri.o2.com (TNS V1-V3)

SQL> oradebug event 10046 trace name context forever, level 12;
Statement processed.

Can't convert a Table Space into Read Only Mode

Posted by Sagar Patil

Reason : Active x’actions running on the table space

Locate SQL_TEXT and Session Address running alter Tablspace Command

SELECT SQL_TEXT, SADDR
FROM V$SQLAREA,V$SESSION
WHERE V$SQLAREA.ADDRESS = V$SESSION.SQL_ADDRESS
AND lower(SQL_TEXT) LIKE ‘alter tablespace%’;

V$transaction will show active X’actions in System

SELECT SES_ADDR, START_SCNB,used_urec,used_ublk
FROM V$TRANSACTION
ORDER BY START_SCNB;

v$transaction View
Important fields of v$transaction are used_ublk and used_urec. They tell of how many blocks and records the undo for a transaction consists. In order to find out the name of the corresponding rollback segment, join the xidusn field with the usn field of v$rollname. This is demonstrated in

List of Sessions blocking tablepsace in write mode

select saddr, SID,Serial#,username,Status, Machine,SQL_ID
from v$session where saddr in
(select SES_ADDR FROM V$TRANSACTION where used_urec is not null
and used_ublk is not null) ;

SQL clarifying Commands fired by Session.

select b.SQL_TEXT,saddr, SID,Serial#,username,Status,Machine,a.SQL_ID
from v$session a ,v$SQL b
where a.saddr in (select SES_ADDR FROM V$TRANSACTION
where used_urec is not null and used_ublk is not null )
and a.sql_id=b.sql_id

Enable Archivelog, Put Database into ARCHIVE mode

Posted by Sagar Patil

repA>SELECT LOG_MODE FROM SYS.V$DATABASE;

LOG_MODE
————
NOARCHIVELOG

repA> alter system set log_archive_dest_1=’location=/opt/oracle/oraarchive/repA/’
scope=spfile;

repA>show parameter log_archive_format

NAME TYPE VALUE
———————————— ———– ——————————
log_archive_format string %t_%s.dbf

repA>alter system set log_archive_format=’repA_%s.arc’ scope=spfile;

System altered.

repA>alter system set log_archive_start=TRUE scope=spfile;

System altered.

repA>shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.

repA>startup mount
Total System Global Area 320563864 bytes
Fixed Size 735896 bytes
Variable Size 285212672 bytes
Database Buffers 33554432 bytes
Redo Buffers 1060864 bytes
Database mounted.

repA>alter database archivelog;

Database altered.

repA>alter database open;

Database altered.

repA>alter system switch logfile;

System altered.

repA>SELECT LOG_MODE FROM SYS.V$DATABASE;

LOG_MODE
————
ARCHIVELOG

$ ls -l
total 788
-rw-r—– 1 oracle dba 395264 Sep 25 11:06 repA_25.arc
-rw-r—– 1 oracle dba 2048 Sep 25 11:06 repA_26.arc

Oracle Data Pump

Posted by Sagar Patil

– Data Pump runs only on the server side.
– You may initiate the export from a client but the job(s) themselves will run inside an Oracle server.
– There are no dump files (expdat.dmp) or log files that will be created on your local machine

How to use DataPUMP?

To Export Entire Database use FULL =Y , for schema use schemas=<USERNAMES>

FULL expdp system/XXX FULL=y DIRECTORY=dexport DUMPFILE=expdata.dmp LOGFILE=expdata.log
Schema expdp system SCHEMA=DOTCOM DIRECTORY=export DUMPFILE=expdata.dmp LOGFILE=expdata.log

Data pump could be used over Database link as an example below
SQL-S2> CREATE DATABASE LINK S1 CONNECT TO JKOOP IDENTIFIED BY PWD USING ‘S1’;

SQL-S2> CREATE DIRECTORY mydump AS ‘/app/oracle/admin/itctvt/export’ ;

E:> expdp jkoop/pwd FULL=y DIRECTORY=mydump NETWORK_LINK=S1 DUMPFILE=byDB.dmp LOGFILE=byDB.log

Introduction to Monitoring Data Pump
DBA_DATAPUMP_JOBS : This view will show the active Data Pump jobs, their state, degree of parallelism, and the number of sessions attached.

SQL> select * from dba_datapump_jobs

OWNER_NAME JOB_NAME OPERATION JOB_MODE STATE DEGREE ATTACHED_SESSIONS
———- ———————- ———- ———- ————- ——— —————–
JKOOP SYS_EXPORT_FULL_01 EXPORT FULL EXECUTING 1 1
JKOOP SYS_EXPORT_SCHEMA_01 EXPORT SCHEMA EXECUTING 1 1

DBA_DATAPUMP_SESSIONS : This view give gives the SADDR that assist in determining why a Data Pump session may be having problems. Join to the V$SESSION view for further information.
SQL> SELECT * FROM DBA_DATAPUMP_SESSIONS

OWNER_NAME JOB_NAME SADDR
———- —————————— ——–
JKOOPMANN SYS_EXPORT_FULL_01 225BDEDC
JKOOPMANN SYS_EXPORT_SCHEMA_01 225B2B7C

V$SESSION_LONGOPS :This view helps determine how well a Data Pump export is doing. Basically gives you a progress indicator through the MESSAGE column.
SQL> select username,opname,target_desc,sofar,totalwork,message from V$SESSION_LONGOPS

USERNAME OPNAME TARGET_DES SOFAR TOTALWORK MESSAGE
——– ——————– ———- —– ———- ————————————————
JKOOP SYS_EXPORT_FULL_01 EXPORT 132 132 SYS_EXPORT_FULL_01:EXPORT:132 out of 132 MB done
JKOOP SYS_EXPORT_FULL_01 EXPORT 90 132 SYS_EXPORT_FULL_01:EXPORT:90 out of 132 MB done
JKOOP SYS_EXPORT_SCHEMA_01 EXPORT 17 17 SYS_EXPORT_SCHEMA_01:EXPORT:17 out of 17 MB done
JKOOP SYS_EXPORT_SCHEMA_01 EXPORT 19 19 SYS_EXPORT_SCHEMA_01:EXPORT:19 out of 19 MB done

Top of Page

Top menu