Tuesday, October 28, 2008

create controlfile to increase maxdatafiles in contro file

setp 1. Before increase it, notice the column RECORDS_TOTAL of REDO LOG and DATAFILE, 16 and 100 before increament.


SQL> select * from v$controlfile_record_section;

TYPE RECORD_SIZE RECORDS_TOTAL RECORDS_USED FIRST_INDEX
---------------------------- ----------- ------------- ------------ -----------
LAST_INDEX LAST_RECID
---------- ----------
DATABASE 316 1 1 0
0 0

CKPT PROGRESS 8180 11 0 0
0 0

REDO THREAD 256 8 1 0
0 0

REDO LOG 72 16 16 0
0 12

DATAFILE 428 100 5 0
0 36

FILENAME 524 2298 44 0
0 0

TABLESPACE 68 100 6 0
0 1

TEMPORARY FILENAME 56 100 1 0
0 1

RMAN CONFIGURATION 1108 50 1 0
0 1

LOG HISTORY 56 292 42 1
42 42

OFFLINE RANGE 200 163 0 0
0 0

ARCHIVED LOG 584 308 49 1
49 49

BACKUP SET 40 409 7 1
7 7

BACKUP PIECE 736 200 7 1
7 7

BACKUP DATAFILE 116 282 6 1
6 6

BACKUP REDOLOG 76 215 10 1
10 10

DATAFILE COPY 660 223 16 1
16 16

BACKUP CORRUPTION 44 371 0 0
0 0

COPY CORRUPTION 40 409 0 0
0 0

DELETED OBJECT 20 818 10 1
10 10

PROXY COPY 852 249 0 0
0 0

BACKUP SPFILE 36 454 2 1
2 2

DATABASE INCARNATION 56 292 5 1
5 5

FLASHBACK LOG 84 2048 35 0
0 0

RECOVERY DESTINATION 180 1 1 0
0 0

INSTANCE SPACE RESERVATION 28 1055 1 0
0 0

REMOVABLE RECOVERY FILES 32 1000 7 0
0 0

RMAN STATUS 116 141 40 1
40 40

THREAD INSTANCE NAME MAPPING 80 8 8 0
0 0

MTTR 100 8 1 0
0 0

DATAFILE HISTORY 568 57 0 0
0 0

STANDBY DATABASE MATRIX 400 10 10 0
0 0

GUARANTEED RESTORE POINT 212 2048 0 0
0 0

RESTORE POINT 212 2083 0 0
0 0


34 rows selected.

step 2: generate trace file

SQL> alter database backup controlfile to trace as '/tmp/cntl_recreate.trc' noresetlogs;

Database altered.



[oracle@localhost ~]$ cat /tmp/cntl_recreate.trc
-- The following are current System-scope REDO Log Archival related
-- parameters and can be included in the database initialization file.
--
-- LOG_ARCHIVE_DEST=''
-- LOG_ARCHIVE_DUPLEX_DEST=''
--
-- LOG_ARCHIVE_FORMAT=OCP10G_%t_%s_%r.arc
--
-- DB_UNIQUE_NAME="OCP10G"
--
-- LOG_ARCHIVE_CONFIG='SEND, RECEIVE, NODG_CONFIG'
-- LOG_ARCHIVE_MAX_PROCESSES=2
-- STANDBY_FILE_MANAGEMENT=MANUAL
-- STANDBY_ARCHIVE_DEST=?/dbs/arch
-- FAL_CLIENT=''
-- FAL_SERVER=''
--
-- LOG_ARCHIVE_DEST_1='LOCATION=/apps/oracle/oraarch/OCP10G'
-- LOG_ARCHIVE_DEST_1='OPTIONAL REOPEN=300 NODELAY'
-- LOG_ARCHIVE_DEST_1='ARCH NOAFFIRM NOEXPEDITE NOVERIFY SYNC'
-- LOG_ARCHIVE_DEST_1='REGISTER NOALTERNATE NODEPENDENCY'
-- LOG_ARCHIVE_DEST_1='NOMAX_FAILURE NOQUOTA_SIZE NOQUOTA_USED NODB_UNIQUE_NAME'
-- LOG_ARCHIVE_DEST_1='VALID_FOR=(PRIMARY_ROLE,ONLINE_LOGFILES)'
-- LOG_ARCHIVE_DEST_STATE_1=ENABLE

--
-- The following commands will create a new control file and use it
-- to open the database.
-- Data used by Recovery Manager will be lost.
-- Additional logs may be required for media recovery of offline
-- Use this only if the current versions of all online logs are
-- available.

-- After mounting the created controlfile, the following SQL
-- statement will place the database in the appropriate
-- protection mode:
-- ALTER DATABASE SET STANDBY DATABASE TO MAXIMIZE PERFORMANCE

STARTUP NOMOUNT
CREATE CONTROLFILE REUSE DATABASE "OCP10G" NORESETLOGS ARCHIVELOG
MAXLOGFILES 16
MAXLOGMEMBERS 3
MAXDATAFILES 100
MAXINSTANCES 8
MAXLOGHISTORY 292
LOGFILE
GROUP 1 '/apps/oracle/oradata/OCP10G/redo01.log' SIZE 50M,
GROUP 2 '/apps/oracle/oradata/OCP10G/redo02.log' SIZE 50M,
GROUP 3 '/apps/oracle/oradata/OCP10G/redo03.log' SIZE 50M
-- STANDBY LOGFILE

DATAFILE
'/apps/oracle/oradata/OCP10G/system01.dbf',
'/apps/oracle/oradata/OCP10G/undotbs01.dbf',
'/apps/oracle/oradata/OCP10G/sysaux01.dbf',
'/apps/oracle/oradata/OCP10G/users01.dbf',
'/apps/oracle/oradata/OCP10G/example01.dbf'
CHARACTER SET WE8ISO8859P1
;

-- Configure RMAN configuration record 1
VARIABLE RECNO NUMBER;
EXECUTE :RECNO := SYS.DBMS_BACKUP_RESTORE.SETCONFIG('DEVICE TYPE','DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 1');
-- Commands to re-create incarnation table
-- Below log names MUST be changed to existing filenames on
-- disk. Any one log file from each branch can be used to
-- re-create incarnation records.
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651015758.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651853690.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651854542.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651856491.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651862296.arc';
-- Recovery is required if any of the datafiles are restored backups,
-- or if the last shutdown was not normal or immediate.
RECOVER DATABASE

-- All logs need archiving and a log switch is needed.
ALTER SYSTEM ARCHIVE LOG ALL;

-- Database can now be opened normally.
ALTER DATABASE OPEN;

-- Commands to add tempfiles to temporary tablespaces.
-- Online tempfiles have complete space information.
-- Other tempfiles may require adjustment.
ALTER TABLESPACE TEMP ADD TEMPFILE '/apps/oracle/oradata/OCP10G/temp01.dbf'
SIZE 30408704 REUSE AUTOEXTEND ON NEXT 655360 MAXSIZE 32767M;
-- End of tempfile additions.



step 3: prepare the sql modified from trace file

--my comments
--STARTUP NOMOUNT
--remove below line among "CREATE CONTROLFILE". it seems doesn't support remarks within the CREATE statement



cp ./cntrl_receate.sql cntrl_receate_new.sql

[oracle@localhost create-cntrl]$ cat cntrl_receate_new.sql
-- The following are current System-scope REDO Log Archival related
-- parameters and can be included in the database initialization file.
--
-- LOG_ARCHIVE_DEST=''
-- LOG_ARCHIVE_DUPLEX_DEST=''
--
-- LOG_ARCHIVE_FORMAT=OCP10G_%t_%s_%r.arc
--
-- DB_UNIQUE_NAME="OCP10G"
--
-- LOG_ARCHIVE_CONFIG='SEND, RECEIVE, NODG_CONFIG'
-- LOG_ARCHIVE_MAX_PROCESSES=2
-- STANDBY_FILE_MANAGEMENT=MANUAL
-- STANDBY_ARCHIVE_DEST=?/dbs/arch
-- FAL_CLIENT=''
-- FAL_SERVER=''
--
-- LOG_ARCHIVE_DEST_1='LOCATION=/apps/oracle/oraarch/OCP10G'
-- LOG_ARCHIVE_DEST_1='OPTIONAL REOPEN=300 NODELAY'
-- LOG_ARCHIVE_DEST_1='ARCH NOAFFIRM NOEXPEDITE NOVERIFY SYNC'
-- LOG_ARCHIVE_DEST_1='REGISTER NOALTERNATE NODEPENDENCY'
-- LOG_ARCHIVE_DEST_1='NOMAX_FAILURE NOQUOTA_SIZE NOQUOTA_USED NODB_UNIQUE_NAME'
-- LOG_ARCHIVE_DEST_1='VALID_FOR=(PRIMARY_ROLE,ONLINE_LOGFILES)'
-- LOG_ARCHIVE_DEST_STATE_1=ENABLE

--
-- The following commands will create a new control file and use it
-- to open the database.
-- Data used by Recovery Manager will be lost.
-- Additional logs may be required for media recovery of offline
-- Use this only if the current versions of all online logs are
-- available.

-- After mounting the created controlfile, the following SQL
-- statement will place the database in the appropriate
-- protection mode:
-- ALTER DATABASE SET STANDBY DATABASE TO MAXIMIZE PERFORMANCE

--my comments
--STARTUP NOMOUNT
--remove below line among "CREATE CONTROLFILE". it seems doesn't support remarks within the CREATE statement

-- "-- STANDBY LOGFILE"

CREATE CONTROLFILE REUSE DATABASE "OCP10G" NORESETLOGS ARCHIVELOG
MAXLOGFILES 32
MAXLOGMEMBERS 3
MAXDATAFILES 1000
MAXINSTANCES 8
MAXLOGHISTORY 292
LOGFILE
GROUP 1 '/apps/oracle/oradata/OCP10G/redo01.log' SIZE 50M,
GROUP 2 '/apps/oracle/oradata/OCP10G/redo02.log' SIZE 50M,
GROUP 3 '/apps/oracle/oradata/OCP10G/redo03.log' SIZE 50M
DATAFILE
'/apps/oracle/oradata/OCP10G/system01.dbf',
'/apps/oracle/oradata/OCP10G/undotbs01.dbf',
'/apps/oracle/oradata/OCP10G/sysaux01.dbf',
'/apps/oracle/oradata/OCP10G/users01.dbf',
'/apps/oracle/oradata/OCP10G/example01.dbf'
CHARACTER SET WE8ISO8859P1
;

-- Configure RMAN configuration record 1
VARIABLE RECNO NUMBER;
EXECUTE :RECNO := SYS.DBMS_BACKUP_RESTORE.SETCONFIG('DEVICE TYPE','DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 1');
-- Commands to re-create incarnation table
-- Below log names MUST be changed to existing filenames on
-- disk. Any one log file from each branch can be used to
-- re-create incarnation records.
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651015758.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651853690.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651854542.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651856491.arc';
-- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651862296.arc';
-- Recovery is required if any of the datafiles are restored backups,
-- or if the last shutdown was not normal or immediate.
RECOVER DATABASE

-- All logs need archiving and a log switch is needed.
ALTER SYSTEM ARCHIVE LOG ALL;

-- Database can now be opened normally.
ALTER DATABASE OPEN;

-- Commands to add tempfiles to temporary tablespaces.
-- Online tempfiles have complete space information.
-- Other tempfiles may require adjustment.
ALTER TABLESPACE TEMP ADD TEMPFILE '/apps/oracle/oradata/OCP10G/temp01.dbf'
SIZE 30408704 REUSE AUTOEXTEND ON NEXT 655360 MAXSIZE 32767M;
-- End of tempfile additions.
--



step 4: shutdown current database
step 5: backup existing control file

[oracle@localhost oradata]$ cd OCP10G
[oracle@localhost OCP10G]$ mkdir cntrl_bak
[oracle@localhost OCP10G]$ ls con*
control01.ctl control02.ctl control03.ctl
[oracle@localhost OCP10G]$ mv con*.ctl ./cntrl_bak/
[oracle@localhost OCP10G]$ ls*.ctl ./cntrl_bak/
bash: ls*.ctl: command not found
[oracle@localhost OCP10G]$ ls *.ctl ./cntrl_bak/
ls: *.ctl: No such file or directory
./cntrl_bak/:
control01.ctl control02.ctl control03.ctl


step 6: startup nomount


SQL> spool recreate_cntrl.log
SQL> startup nomount
ORACLE instance started.

Total System Global Area 1593835520 bytes
Fixed Size 2073088 bytes
Variable Size 872418816 bytes
Database Buffers 704643072 bytes
Redo Buffers 14700544 bytes

step 7: invoke the scripts

SQL> @cntrl_recreate_new.sql
SQL> -- The following are current System-scope REDO Log Archival related
SQL> -- parameters and can be included in the database initialization file.
SQL> --
SQL> -- LOG_ARCHIVE_DEST=''
SQL> -- LOG_ARCHIVE_DUPLEX_DEST=''
SQL> --
SQL> -- LOG_ARCHIVE_FORMAT=OCP10G_%t_%s_%r.arc
SQL> --
SQL> -- DB_UNIQUE_NAME="OCP10G"
SQL> --
SQL> -- LOG_ARCHIVE_CONFIG='SEND, RECEIVE, NODG_CONFIG'
SQL> -- LOG_ARCHIVE_MAX_PROCESSES=2
SQL> -- STANDBY_FILE_MANAGEMENT=MANUAL
SQL> -- STANDBY_ARCHIVE_DEST=?/dbs/arch
SQL> -- FAL_CLIENT=''
SQL> -- FAL_SERVER=''
SQL> --
SQL> -- LOG_ARCHIVE_DEST_1='LOCATION=/apps/oracle/oraarch/OCP10G'
SQL> -- LOG_ARCHIVE_DEST_1='OPTIONAL REOPEN=300 NODELAY'
SQL> -- LOG_ARCHIVE_DEST_1='ARCH NOAFFIRM NOEXPEDITE NOVERIFY SYNC'
SQL> -- LOG_ARCHIVE_DEST_1='REGISTER NOALTERNATE NODEPENDENCY'
SQL> -- LOG_ARCHIVE_DEST_1='NOMAX_FAILURE NOQUOTA_SIZE NOQUOTA_USED NODB_UNIQUE_NAME'
SQL> -- LOG_ARCHIVE_DEST_1='VALID_FOR=(PRIMARY_ROLE,ONLINE_LOGFILES)'
SQL> -- LOG_ARCHIVE_DEST_STATE_1=ENABLE
SQL>
SQL> --
SQL> -- The following commands will create a new control file and use it
SQL> -- to open the database.
SQL> -- Data used by Recovery Manager will be lost.
SQL> -- Additional logs may be required for media recovery of offline
SQL> -- Use this only if the current versions of all online logs are
SQL> -- available.
SQL>
SQL> -- After mounting the created controlfile, the following SQL
SQL> -- statement will place the database in the appropriate
SQL> -- protection mode:
SQL> -- ALTER DATABASE SET STANDBY DATABASE TO MAXIMIZE PERFORMANCE
SQL>
SQL> --STARTUP NOMOUNT
SQL> CREATE CONTROLFILE REUSE DATABASE "OCP10G" NORESETLOGS ARCHIVELOG
2 MAXLOGFILES 32
3 MAXLOGMEMBERS 3
4 MAXDATAFILES 1000
5 MAXINSTANCES 8
6 MAXLOGHISTORY 292
7 LOGFILE
8 GROUP 1 '/apps/oracle/oradata/OCP10G/redo01.log' SIZE 50M,
9 GROUP 2 '/apps/oracle/oradata/OCP10G/redo02.log' SIZE 50M,
10 GROUP 3 '/apps/oracle/oradata/OCP10G/redo03.log' SIZE 50M
11 DATAFILE
12 '/apps/oracle/oradata/OCP10G/system01.dbf',
13 '/apps/oracle/oradata/OCP10G/undotbs01.dbf',
14 '/apps/oracle/oradata/OCP10G/sysaux01.dbf',
15 '/apps/oracle/oradata/OCP10G/users01.dbf',
16 '/apps/oracle/oradata/OCP10G/example01.dbf'
17 CHARACTER SET WE8ISO8859P1
18 ;

Control file created.

SQL>
SQL> -- Configure RMAN configuration record 1
SQL> VARIABLE RECNO NUMBER;
SQL> EXECUTE :RECNO := SYS.DBMS_BACKUP_RESTORE.SETCONFIG('DEVICE TYPE','DISK BACKUP TYPE TO COMPRESSED BACKUPSET PARALLELISM 1');

PL/SQL procedure successfully completed.

SQL> -- Commands to re-create incarnation table
SQL> -- Below log names MUST be changed to existing filenames on
SQL> -- disk. Any one log file from each branch can be used to
SQL> -- re-create incarnation records.
SQL> -- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651015758.arc';
SQL> -- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651853690.arc';
SQL> -- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651854542.arc';
SQL> -- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651856491.arc';
SQL> -- ALTER DATABASE REGISTER LOGFILE '/apps/oracle/oraarch/OCP10G/OCP10G_1_1_651862296.arc';
SQL> -- Recovery is required if any of the datafiles are restored backups,
SQL> -- or if the last shutdown was not normal or immediate.
SQL> RECOVER DATABASE
ORA-00283: recovery session canceled due to errors
ORA-00264: no recovery required


SQL>
SQL> -- All logs need archiving and a log switch is needed.
SQL> ALTER SYSTEM ARCHIVE LOG ALL;

System altered.

SQL>
SQL> -- Database can now be opened normally.
SQL> ALTER DATABASE OPEN;

Database altered.

SQL>
SQL> -- Commands to add tempfiles to temporary tablespaces.
SQL> -- Online tempfiles have complete space information.
SQL> -- Other tempfiles may require adjustment.
SQL> ALTER TABLESPACE TEMP ADD TEMPFILE '/apps/oracle/oradata/OCP10G/temp01.dbf'
2 SIZE 30408704 REUSE AUTOEXTEND ON NEXT 655360 MAXSIZE 32767M;

Tablespace altered.

SQL> -- End of tempfile additions.
SQL> --

step 8: verify the result of REDO LOG and DATAFILE

SQL> select * from v$controlfile_record_section;
TYPE RECORD_SIZE RECORDS_TOTAL RECORDS_USED FIRST_INDEX
---------------------------- ----------- ------------- ------------ -----------
LAST_INDEX LAST_RECID
---------- ----------
DATABASE 316 1 1 0
0 0

CKPT PROGRESS 8180 11 0 0
0 0

REDO THREAD 256 8 1 0
0 0

REDO LOG 72 32 3 0
0 0

DATAFILE 428 1000 5 0
0 0

FILENAME 524 4146 9 0
0 0

TABLESPACE 68 1000 6 0
0 1

TEMPORARY FILENAME 56 1000 1 0
0 1

RMAN CONFIGURATION 1108 50 1 0
0 1

LOG HISTORY 56 292 1 1
1 1

OFFLINE RANGE 200 1063 0 0
0 0

ARCHIVED LOG 584 308 3 1
3 3

BACKUP SET 40 1227 0 0
0 0

BACKUP PIECE 736 1000 0 0
0 0

BACKUP DATAFILE 116 1128 0 0
0 0

BACKUP REDOLOG 76 215 0 0
0 0

DATAFILE COPY 660 1016 0 0
0 0

BACKUP CORRUPTION 44 1115 0 0
0 0

COPY CORRUPTION 40 1227 0 0
0 0

DELETED OBJECT 20 818 0 0
0 0

PROXY COPY 852 1017 0 0
0 0

BACKUP SPFILE 36 454 0 0
0 0

DATABASE INCARNATION 56 292 1 1
1 1

FLASHBACK LOG 84 2048 0 0
0 0

RECOVERY DESTINATION 180 1 1 0
0 0

INSTANCE SPACE RESERVATION 28 1055 0 0
0 0

REMOVABLE RECOVERY FILES 32 1000 0 0
0 0

RMAN STATUS 116 141 0 0
0 0

THREAD INSTANCE NAME MAPPING 80 8 8 0
0 0

MTTR 100 8 1 0
0 0

DATAFILE HISTORY 568 57 0 0
0 0

STANDBY DATABASE MATRIX 400 10 10 0
0 0

GUARANTEED RESTORE POINT 212 2048 0 0
0 0

RESTORE POINT 212 2083 0 0
0 0


34 rows selected.

SQL> spool off
SQL> shutdown immediate
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> startup
ORACLE instance started.

Total System Global Area 1593835520 bytes
Fixed Size 2073088 bytes
Variable Size 872418816 bytes
Database Buffers 704643072 bytes
Redo Buffers 14700544 bytes
Database mounted.
Database opened.

step 9: note that the size of controlfile is creaased

[oracle@localhost OCP10G]$ ls -lrt
total 1625676
drwxrwxr-x 2 oracle oracle 4096 Oct 28 13:57 cntrl_bak
-rw-r----- 1 oracle oracle 52429312 Oct 28 14:03 redo02.log
-rw-r----- 1 oracle oracle 52429312 Oct 28 14:03 redo01.log
-rw-r----- 1 oracle oracle 30416896 Oct 28 14:03 temp01.dbf
-rw-rw---- 1 oracle oracle 5251072 Oct 28 14:04 users01.dbf
-rw-r----- 1 oracle oracle 492838912 Oct 28 14:04 undotbs01.dbf
-rw-r----- 1 oracle oracle 524296192 Oct 28 14:04 system01.dbf
-rw-r----- 1 oracle oracle 304095232 Oct 28 14:04 sysaux01.dbf
-rw-r----- 1 oracle oracle 52429312 Oct 28 14:04 redo03.log
-rw-r----- 1 oracle oracle 104865792 Oct 28 14:04 example01.dbf
-rw-rw---- 1 oracle oracle 14630912 Oct 28 14:04 control03.ctl
-rw-rw---- 1 oracle oracle 14630912 Oct 28 14:04 control02.ctl
-rw-rw---- 1 oracle oracle 14630912 Oct 28 14:04 control01.ctl

[oracle@localhost OCP10G]$ ll -l cntrl_bak/
total 21696
-rw-rw---- 1 oracle oracle 7389184 Oct 28 13:56 control01.ctl
-rw-rw---- 1 oracle oracle 7389184 Oct 28 13:56 control02.ctl
-rw-rw---- 1 oracle oracle 7389184 Oct 28 13:56 control03.ctl


--fallback

In case of fallback, just
a). shutdown abort the instance
b). cp over the backed control files and startup again

Tuesday, October 21, 2008

Runing advisor without voilate license agreement.

I like the advisors introudced in 10g, but company always tries to cut cost which affects buying the tuning package license. Luckily there is back door, i.e we can call them from command line. Below is the illustration of using segment advisor.

googled and get the sample template from http://www.oracle-base.com/articles/10g/SpaceObjectTransactionManagement10g.php

cat call_segment_advisor.sql

DECLARE
l_object_id NUMBER;
BEGIN
-- Create a segment advisor task for the SCOTT.EMP table.
-- update your own owner, table name (case-sensitive)
DBMS_ADVISOR.create_task (
advisor_name => 'Segment Advisor',
task_name => 'EMP_SEGMENT_ADVISOR',
task_desc => 'Segment Advisor For EMP');

DBMS_ADVISOR.create_object (
task_name => 'EMP_SEGMENT_ADVISOR',
object_type => 'TABLE',
attr1 => 'DBAM1',
attr2 => 'M1_OUTBOUND_USAGE',
attr3 => NULL,
attr4 => 'null',
attr5 => NULL,
object_id => l_object_id);

DBMS_ADVISOR.set_task_parameter (
task_name => 'EMP_SEGMENT_ADVISOR',
parameter => 'RECOMMEND_ALL',
value => 'TRUE');

DBMS_ADVISOR.execute_task(task_name => 'EMP_SEGMENT_ADVISOR');
END;
/


-- Create a segment advisor task for the USERS tablespace.
-- DBMS_ADVISOR.create_task (
-- advisor_name => 'Segment Advisor',
-- task_name => 'USERS_SEGMENT_ADVISOR',
-- task_desc => 'Segment Advisor For USERS');

-- DBMS_ADVISOR.create_object (
-- task_name => 'USERS_SEGMENT_ADVISOR',
-- object_type => 'TABLESPACE',
-- attr1 => 'USERS',
-- attr2 => NULL,
-- attr3 => NULL,
-- attr4 => 'null',
-- attr5 => NULL,
-- object_id => l_object_id);

-- DBMS_ADVISOR.set_task_parameter (
-- task_name => 'USERS_SEGMENT_ADVISOR',
-- parameter => 'RECOMMEND_ALL',
-- value => 'TRUE');

-- DBMS_ADVISOR.execute_task(task_name => 'USERS_SEGMENT_ADVISOR');
--END;
--/

-- Display the findings.
SET LINESIZE 250
COLUMN task_name FORMAT A20
COLUMN object_type FORMAT A20
COLUMN schema FORMAT A20
COLUMN object_name FORMAT A30
COLUMN object_name FORMAT A30
COLUMN message FORMAT A40
COLUMN more_info FORMAT A40

SELECT f.task_name,
f.impact,
o.type AS object_type,
o.attr1 AS schema,
o.attr2 AS object_name,
f.message,
f.more_info
FROM dba_advisor_findings f
JOIN dba_advisor_objects o ON f.object_id = o.object_id AND f.task_name = o.task_name
WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR')
ORDER BY f.task_name, f.impact DESC;

select message from dba_advisor_findings f
WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR') ;

select more_info from dba_advisor_findings f
WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR') ;

exec dbms_advisor.delete_task('EMP_SEGMENT_ADVISOR');


SQL> @call_segment_advisor.sql


PL/SQL procedure successfully completed.


TASK_NAME IMPACT OBJECT_TYPE SCHEMA OBJECT_NAME MESSAGE MORE_INFO
-------------------- ---------- -------------------- -------------------- ------------------------------ ---------------------------------------- ----------------------------------------
EMP_SEGMENT_ADVISOR TABLE DBAM1 M1_OUTBOUND_USAGE The free space in the object is less tha Allocated Space:15398338560: Used Space:
n the size of the last extent. 14503648178: Reclaimable Space :89469038
2:


SQL> select message from dba_advisor_findings f
2 WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR');

MESSAGE
----------------------------------------
The free space in the object is less tha
n the size of the last extent.


SQL> select more_info from dba_advisor_findings f WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR');

MORE_INFO
----------------------------------------
Allocated Space:15398338560: Used Space:
14503648178: Reclaimable Space :89469038
2:

:
SQL> select more_info FROM dba_advisor_findings f WHERE f.task_name IN ('EMP_SEGMENT_ADVISOR6');

MORE_INFO
----------------------------------------
Allocated Space:21937386023: Used Space:
21863850510: Reclaimable Space :73535513
:

--delete the task if no longer needed, thus the script can be re-used easily without changing task name

SQL> exec dbms_advisor.delete_task('EMP_SEGMENT_ADVISOR2');

PL/SQL procedure successfully completed.


Read more the Oracle document "Oracle® Database PL/SQL Packages and Types Reference", DBMS_ADVISOR allows us to call all seven advisors defined in the view DBA_ADVISOR_DEFINITIONS.

BTW, the speed is surpising with ASSM. For above case, regardless the table size is about 21Gb, it finishes within 5 seconds.

Cheers!

Monday, September 22, 2008

Define your favorite editor for sqlplus

Two way to do so.

1. Inside sqlplus : define _editor=vi
2. Outside sqlplus : export EDITOR=vi

lock and create/drop index

Conclusion:

DDL (CREATE&DROP INDEX) conflict with DML. More explanation can be found below

Locks and ORA-00054 error



SQL> create table t1 (f1 number);

Table created.

SQL> select * from t1;

no rows selected

SQL> create index t1_idx on t1(f1);

Index created.

SQL> drop index t1_idx;
drop index t1_idx
*
ERROR at line 1:
ORA-00054: resource busy and acquire with NOWAIT specified


SQL> commit;

Commit complete.

SQL> select * from t1;

no rows selected

SQL> select * from t1;

F1
----------
2

SQL> drop index t1_idx;

Index dropped.

SQL> create index t1_idx on t1(f1);
create index t1_idx on t1(f1)
*
ERROR at line 1:
ORA-00054: resource busy and acquire with NOWAIT specified

Monday, September 15, 2008

Using gdb for debug

install Oracle XE on Ubuntu x86_64

1. download oracle-xe_10.2.0.1-1.0_i386.deb from otn.oracle.com
2. Double click the deb file, througs you below error message
"Error: wrong architecture i386"
3. Try below command

hmc@hmc-desktop:~/Desktop$ sudo dpkg -i --force-architecture oracle-xe_10.2.0.1-1.0_i386.deb
[sudo] password for hmc:
dpkg - warning, overriding problem because --force enabled:
package architecture (i386) does not match system (amd64)
Selecting previously deselected package oracle-xe.
(Reading database ... 344121 files and directories currently installed.)
Unpacking oracle-xe (from oracle-xe_10.2.0.1-1.0_i386.deb) ...
dpkg: dependency problems prevent configuration of oracle-xe:
oracle-xe depends on libaio (>= 0.3.96) | libaio1 (>= 0.3.96); however:
Package libaio is not installed.
Package libaio1 is not installed.
dpkg: error processing oracle-xe (--install):
dependency problems - leaving unconfigured
Errors were encountered while processing:
oracle-xe

4. Repeat below command to install missing packages

hmc@hmc-desktop:~/Desktop$ sudo apt-get install libaio*
Reading package lists... Done
Building dependency tree
Reading state information... Done
Note, selecting libaiksaurus0c102 for regex 'libaio*'
Note, selecting libaiksaurus-1.2-data for regex 'libaio*'
Note, selecting libaiksaurusgtk-1.2-dev for regex 'libaio*'
Note, selecting libaiksaurusgtk-1.2-0 for regex 'libaio*'
Note, selecting libaio1 for regex 'libaio*'
Note, selecting libaiksaurus-1.2-dev for regex 'libaio*'
Note, selecting libaiksaurus0 for regex 'libaio*'
Note, selecting libaiksaurusgtk0c102 for regex 'libaio*'
Note, selecting libaio-dev for regex 'libaio*'
Note, selecting libaiksaurusgtk-1.2-0c2a for regex 'libaio*'
Note, selecting libaiksaurusgtk0 for regex 'libaio*'
Note, selecting libaio for regex 'libaio*'
Note, selecting libaio1-dbg for regex 'libaio*'
Note, selecting libaiksaurus-1.2-0 for regex 'libaio*'
Note, selecting libaiksaurusgtk-bin for regex 'libaio*'
Note, selecting gaiksaurus instead of libaiksaurusgtk-bin
Note, selecting libaiksaurus-1.2-0c2a for regex 'libaio*'
Note, selecting libaiksaurusgtk-dev for regex 'libaio*'
Note, selecting libaiksaurusgtk-1.2-dev instead of libaiksaurusgtk-dev
Note, selecting libaiksaurus-bin for regex 'libaio*'
Note, selecting aiksaurus instead of libaiksaurus-bin
Note, selecting libaiksaurus-data for regex 'libaio*'
Note, selecting libaiksaurus-1.2-data instead of libaiksaurus-data
Note, selecting libaiksaurus-dev for regex 'libaio*'
Note, selecting libaiksaurus-1.2-dev instead of libaiksaurus-dev
The following packages were automatically installed and are no longer required:
ubuntustudio-icon-theme ubuntustudio-gdm-theme ubuntustudio-theme
ubuntustudio-wallpapers
Use 'apt-get autoremove' to remove them.
The following extra packages will be installed:
aiksaurus gaiksaurus libaiksaurus-1.2-0c2a libaiksaurus-1.2-data
libaiksaurus-1.2-dev libaiksaurusgtk-1.2-0c2a libaiksaurusgtk-1.2-dev
libaio-dev libaio1 libaio1-dbg
The following NEW packages will be installed:
aiksaurus gaiksaurus libaiksaurus-1.2-0c2a libaiksaurus-1.2-data
libaiksaurus-1.2-dev libaiksaurusgtk-1.2-0c2a libaiksaurusgtk-1.2-dev
libaio-dev libaio1 libaio1-dbg
0 upgraded, 10 newly installed, 0 to remove and 1 not upgraded.
Need to get 513kB of archives.
After this operation, 1642kB of additional disk space will be used.
Do you want to continue [Y/n]? y
Get:1 http://tw.archive.ubuntu.com hardy/main libaiksaurus-1.2-data 1.2.1+dev-0.12-5 [318kB]
Get:2 http://tw.archive.ubuntu.com hardy/main libaiksaurus-1.2-0c2a 1.2.1+dev-0.12-5 [25.2kB]
Get:3 http://tw.archive.ubuntu.com hardy/universe aiksaurus 1.2.1+dev-0.12-5 [11.9kB]
Get:4 http://tw.archive.ubuntu.com hardy/main libaiksaurusgtk-1.2-0c2a 1.2.1+dev-0.12-5 [33.8kB]
Get:5 http://tw.archive.ubuntu.com hardy/universe gaiksaurus 1.2.1+dev-0.12-5 [9524B]
Get:6 http://tw.archive.ubuntu.com hardy/main libaiksaurus-1.2-dev 1.2.1+dev-0.12-5 [28.1kB]
Get:7 http://tw.archive.ubuntu.com hardy/main libaiksaurusgtk-1.2-dev 1.2.1+dev-0.12-5 [35.7kB]
Get:8 http://tw.archive.ubuntu.com hardy/main libaio1 0.3.106-8ubuntu1 [8430B]
Get:9 http://tw.archive.ubuntu.com hardy/main libaio-dev 0.3.106-8ubuntu1 [31.4kB]
Get:10 http://tw.archive.ubuntu.com hardy/main libaio1-dbg 0.3.106-8ubuntu1 [11.2kB]
Fetched 513kB in 4s (112kB/s)
Selecting previously deselected package libaiksaurus-1.2-data.
(Reading database ... 346502 files and directories currently installed.)
Unpacking libaiksaurus-1.2-data (from .../libaiksaurus-1.2-data_1.2.1+dev-0.12-5_all.deb) ...
Selecting previously deselected package libaiksaurus-1.2-0c2a.
Unpacking libaiksaurus-1.2-0c2a (from .../libaiksaurus-1.2-0c2a_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package aiksaurus.
Unpacking aiksaurus (from .../aiksaurus_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package libaiksaurusgtk-1.2-0c2a.
Unpacking libaiksaurusgtk-1.2-0c2a (from .../libaiksaurusgtk-1.2-0c2a_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package gaiksaurus.
Unpacking gaiksaurus (from .../gaiksaurus_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package libaiksaurus-1.2-dev.
Unpacking libaiksaurus-1.2-dev (from .../libaiksaurus-1.2-dev_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package libaiksaurusgtk-1.2-dev.
Unpacking libaiksaurusgtk-1.2-dev (from .../libaiksaurusgtk-1.2-dev_1.2.1+dev-0.12-5_amd64.deb) ...
Selecting previously deselected package libaio1.
Unpacking libaio1 (from .../libaio1_0.3.106-8ubuntu1_amd64.deb) ...
Selecting previously deselected package libaio-dev.
Unpacking libaio-dev (from .../libaio-dev_0.3.106-8ubuntu1_amd64.deb) ...
Selecting previously deselected package libaio1-dbg.
Unpacking libaio1-dbg (from .../libaio1-dbg_0.3.106-8ubuntu1_amd64.deb) ...
Setting up libaiksaurus-1.2-data (1.2.1+dev-0.12-5) ...
Setting up libaiksaurus-1.2-0c2a (1.2.1+dev-0.12-5) ...

Setting up aiksaurus (1.2.1+dev-0.12-5) ...
Setting up libaiksaurusgtk-1.2-0c2a (1.2.1+dev-0.12-5) ...

Setting up gaiksaurus (1.2.1+dev-0.12-5) ...

Setting up libaiksaurus-1.2-dev (1.2.1+dev-0.12-5) ...
Setting up libaiksaurusgtk-1.2-dev (1.2.1+dev-0.12-5) ...
Setting up libaio1 (0.3.106-8ubuntu1) ...

Setting up libaio-dev (0.3.106-8ubuntu1) ...
Setting up libaio1-dbg (0.3.106-8ubuntu1) ...
Processing triggers for libc6 ...
ldconfig deferred processing now taking place

5. Try again

hmc@hmc-desktop:~/Desktop$ sudo dpkg -i --force-architecture oracle-xe_10.2.0.1-1.0_i386.deb
dpkg - warning, overriding problem because --force enabled:
package architecture (i386) does not match system (amd64)
(Reading database ... 346600 files and directories currently installed.)
Preparing to replace oracle-xe 10.2.0.1-1.0 (using oracle-xe_10.2.0.1-1.0_i386.deb) ...
Unpacking replacement oracle-xe ...
Setting up oracle-xe (10.2.0.1-1.0) ...
Executing Post-install steps...
-e You must run '/etc/init.d/oracle-xe configure' as the root user to configure the database.


hmc@hmc-desktop:~/Desktop$ sudo /etc/init.d/oracle-xe configure

Oracle Database 10g Express Edition Configuration
-------------------------------------------------
This will configure on-boot properties of Oracle Database 10g Express
Edition. The following questions will determine whether the database should
be starting upon system boot, the ports it will use, and the passwords that
will be used for database accounts. Press to accept the defaults.
Ctrl-C will abort.

Specify the HTTP port that will be used for Oracle Application Express [8080]:

Specify a port that will be used for the database listener [1521]:

Specify a password to be used for database accounts. Note that the same
password will be used for SYS and SYSTEM. Oracle recommends the use of
different passwords for each database account. This can be done after
initial configuration:
Confirm the password:

Do you want Oracle Database 10g Express Edition to be started on boot (y/n) [y]:

Starting Oracle Net Listener...Done
Configuring Database...Done
Starting Oracle Database 10g Express Edition Instance...Done
Installation Completed Successfully.
To access the Database Home Page go to "http://127.0.0.1:8080/apex"

6. and edit your ~/.bashrc of id oracle

ORACLE_HOME=/usr/lib/oracle/xe/app/oracle/product/10.2.0/server
PATH=$PATH:$ORACLE_HOME/bin
export ORACLE_HOME
export ORACLE_SID=XE

export PATH


7. If you like rlwrap, define the alias for sqlplus in .bashrc

alias sqlplus='rlwrap sqlplus'

hmc@hmc-desktop:~$ sudo su - oracle
oracle@hmc-desktop:~$ pwd
/usr/lib/oracle/xe
oracle@hmc-desktop:~$ sqlplus "/as sysdba"
-su: sqlplus: command not found
oracle@hmc-desktop:~$ . .bashrc
oracle@hmc-desktop:~$ sqlplus "/as sysdba"

SQL*Plus: Release 10.2.0.1.0 - Production on Mon Sep 15 21:31:23 2008

Copyright (c) 1982, 2005, Oracle. All rights reserved.


Connected to:
Oracle Database 10g Express Edition Release 10.2.0.1.0 - Production

SQL> select * from v$instance;

INSTANCE_NUMBER INSTANCE_NAME
--------------- ----------------
HOST_NAME
----------------------------------------------------------------
VERSION STARTUP_T STATUS PAR THREAD# ARCHIVE LOG_SWITCH_WAIT
----------------- --------- ------------ --- ---------- ------- ---------------
LOGINS SHU DATABASE_STATUS INSTANCE_ROLE ACTIVE_ST BLO EDITION
---------- --- ----------------- ------------------ --------- --- -------
1 XE
hmc-desktop
10.2.0.1.0 15-SEP-08 OPEN NO 1 STOPPED
ALLOWED NO ACTIVE PRIMARY_INSTANCE NORMAL NO XE



Cheers!

One-time scheduling of tasks with 'at'

cheduling services with cron and similar tools is great for repetitive tasks but inconvenient for one-time jobs that only need to be accomplished once. For those tasks, use the 'at' command.

At will execute a command at a particular time and is extremely useful for one-time jobs. For instance, if you are running a script or program and want to receive an e-mail when it's done, or need to schedule a job or task for a particular time or date, at will take care of it for you.

For instance, if you wanted to use at to schedule a job to run at 10:00 p.m., you would use:

$ at 10pm -f script

At uses a very useful time specification that will permit you great flexibility with scheduling. To schedule a job for 2:00 a.m. tomorrow, use:

$ at 2am tomorrow -f script

It can also schedule jobs relational to the current time. For instance, to run a job in exactly three hours, use:

$ at now + 3 hours -f script

When scheduling a command with at, it will print out what the job number is and when it will be executed. To view currently scheduled jobs, use the atq command:

$ atq
154    2008-08-04 20:31 a vdanen

To delete a currently scheduled job, use atrm and provide it the job number:

$ atrm 154

Commands can also be provided to at using standard input, or they can be specified by using the -f option, which takes the script or command to run as the argument.

Another way of scheduling jobs with at can be done using the batch command. This will execute a command when the system load average drops below a certain value, usually 0.8 by default. The batch command takes most of the same arguments as at (less a time specification), and is useful for telling the system to execute a new command after currently running processes--that are using large amounts of processing power--are complete.

Finally, at can be restricted to specific users by using the /etc/at.allow and /etc/at.deny files. If at.allow exists, only users listed in the file may use it. If at.deny exists, but at.allow does not, then all users may use at other than those listed in at.deny. By default, any user may use at.

The at command probably isn't something that will be used on a daily basis, but it is useful when certain tasks need to be executed at certain times, regardless of whether you are in front of the computer or not. If nothing else, it makes a great poor man's TO-DO reminder in that you can use it to send an e-mail or SMS message at specific times, as a reminder for certain events.

Sunday, September 14, 2008

硬盘与闪存的IO性能

闪存主要的操作性能特征是随机读和顺序读一样快,但随机写很慢。作者给了一个典型的闪存与硬盘读写性能对比,闪存是随机读为0.4ms,随机写是 48ms,相对来说硬盘随机读是9.6ms,随机写是4.9ms,也就是随机读闪存性能是硬盘的24倍,但随机写硬盘却是闪存性能的10倍。

Wednesday, September 10, 2008

timing of shrink database vs shrink datafile

I have a sql server 2005 database with single data file and transaction log. The data file size is 40Gb and there is 23Gb free space.
So I want to shrink it by click "shrink database". By the help of belw sql, it only completed 50% even after running 3.5 hours.
select percent_complete, estimated_completion_time, cpu_time, total_elapsed_time, * from sys.dm_exec_requests;

I decide to stop it and change to use "shrink file". Surprising, it completed less than 1 minutes and return 7Gb to disk space.

Funny.

Monday, September 08, 2008

check current redo log file usage

-- Percentage full of the current redo log file.
SELECT ROUND((cpodr_bno/lesiz),2)*100||'%' PctCurLogFull
FROM X$kcccp a, X$kccle b
WHERE a.cpodr_seq = leseq;
PCTCURLOGFULL
---------------
35%

Tuesday, September 02, 2008

7 Interview Questions

如何诊断PGA设定是否合适

A good article from ITPUB about PGA diagnosis

using sysdba for exp/imp

notice that there is one space after slash inside the single quote.
imp userid='/ as sysdba' full=y log=d:\sysfull.txt file=d:\back\sysfull.dmp

Friday, August 29, 2008

very slow when import tables having timestamp columns.

FYI, learned during test CMS data migration with 5 tables having timestamp columns.
The symptom is top wait event " log file sync".
Top 5 Timed Events Avg %Total
~~~~~~~~~~~~~~~~~~ wait Call
Event Waits Time (s) (ms) Time Wait Class
------------------------------ ------------ ----------- ------ ------ ----------
CPU time 3,164 95.2
log file sync 1,409,922 3,105 2 93.4 Commit
log file parallel write 1,409,992 3,036 2 91.3 System I/O

db file parallel write 1,463 17 11 0.5 System I/O
control file parallel write 1,254 9 7 0.3 System I/O

Cause

For a number of data types, imp will insert one row at a time.

This is expected behavior. If table has following datatypes ,

LOBs, LONGs, ADTs (user-defined types), ROWID/UROWIDs and the timestamp.

"For tables containing LONG, LOB, BFILE, REF, ROWID, UROWID, or DATE columns, array inserts are not done. If COMMIT=y, Import commits these tables after each row."

Solution

This issue is same as Bug 2797580 which is closed as not a bug and
subsequently an enhancement request Bug 3452820 was logged. "

Work around to this issue is to use Commit=N parameter while import.

After I comment out "commit=y", I got the import done within 6 mins vs 161 mins with "commit=y"

Wednesday, August 27, 2008

Use import statistics to save database migration downtime

drop table oracms.stat_table;
exec dbms_stats.create_stat_table('ORACMS','STAT_TABLE');

-- backup existing stats
exec dbms_stats.export_schema_stats(ownname=>'DBACMS',stattab=>'STAT_TABLE',statid=>'run_init',statown=>'ORACMS');
-- gather new stats
exec dbms_stats.gather_schema_stats(ownname=>'DBACMS');

-- export newly gathered stats
exec dbms_stats.export_schema_stats(ownname=>'DBACMS',stattab=>'STAT_TABLE',statid=>'run_new',statown=>'ORACMS');

select statid, d1, count(*) from oracms.STAT_TABLE where c5 ='DBACMS' group by statid, d1;

SQL> select last_analyzed, avg_row_len, num_rows from dba_tables where table_name='VAS_INFO';

LAST_ANAL AVG_ROW_LEN NUM_ROWS
--------- ----------- ----------
26-AUG-08 57 12763481

SQL> select last_analyzed, avg_row_len, num_rows,owner from dba_tables where table_name='VAS_INFO';

LAST_ANAL AVG_ROW_LEN NUM_ROWS OWNER
--------- ----------- ---------- ------------------------------
26-AUG-08 57 12763481 DBACMS

SQL> set timing on echo on time on
09:02:36 SQL> EXEC DBMS_STATS.DELETE_SCHEMA_STATS('DBACMS');

PL/SQL procedure successfully completed.

Elapsed: 00:00:09.57
09:02:51 SQL> select last_analyzed, avg_row_len, num_rows from dba_tables where table_name='VAS_INFO';

LAST_ANAL AVG_ROW_LEN NUM_ROWS
--------- ----------- ----------


Elapsed: 00:00:00.01
09:03:08 SQL> EXEC DBMS_STATS.IMPORT_SCHEMA_STATS('DBACMS','STAT_TABLE',statid=>'run_new',statown=>'ORACMS');

PL/SQL procedure successfully completed.

Elapsed: 00:00:07.53
09:03:25 SQL> select last_analyzed, avg_row_len, num_rows from dba_tables where table_name='VAS_INFO';

LAST_ANAL AVG_ROW_LEN NUM_ROWS
--------- ----------- ----------
26-AUG-08 57 12740519

Elapsed: 00:00:00.01

Typical feature not in standard edition 10g

FAST_START_MTTR_TARGET
PARALLEL SERVERS
PARTITIONING

v$option shows more info

Tuesday, August 26, 2008

假如发现表中有坏块,怎样检索其它未坏的数据?

答:首先需要找到坏块的ID(可以运行dbverify实现),假设为1234。运行下面的查询查找段名:


select segment_name, segment_type, block_id, blocks from dba_extents where (1234 between block_id and (block_id + blocks - 1));
一旦找到坏段名称,若段是一个表,则最好建立一个临时表,存放好的数据。若段是索引,则删除它,再重建。

create table good_table as select  from bad_table where rowid not in ( select /+index(bad_table, any_index)/ rowid from bad_table where substr( rowid, 1, 8 ) = 1234)

Listener: Static and Dynamc Registration

Prior to Oracle 8i, a listener was statically configured (listener.ora) to
service a given set of SIDs. From 8i, PMON dynamically registers a database
service with the listener.

Further, if the listener is running on the default TCP port of 1521, then


there is no need to configure a listener.ora at all.
from Oracle 8i onwards, database instances can register themselves with
the Listener when started. Prior to this release, information about the
instance was required to be manually configured in the "listener.ora" file
in the form of a STATIC SID_LIST.

Database instance registration is comprised of two elements:

- Service registration provides the Listener with instance
information, such as database service names and instance
names.

- MTS (Shared Server) dispatcher registration provides dispatcher
information to the Listener.

By default, an instance background process registers instance information
to a Listener on the local machine. If a Listener is started after the
instance, there may be a delay before the instance and dispatchers are
registered (normally 30 to 35 seconds). As such, if a Listener receives an
incoming connection request before an instance is registered, the Listener
may reject the request with an ORA-12514 error. The instance will attempt to


register its services to the Listener periodically.

To disable auto registration add the local_listener parameter to the init<>.ora file or in the spfile to specify a dummy address.

PMON will reject the invalid address and will not attempt to register with port
1521.


It will be necessary to statically configure the SID_LIST_
for your instance if you disable dynamic registration.

lock statistics

[symptom]

Complete gathering statistics for schema or whole database within few minutes, which is not supposed to , as the data size is more than 100Gb. There is no error message prompted.

checked last_analyzed from dba_tables, it is null.

[Troubleshooting]

SQL> ANALYZE TABLE SYS.T1 COMPUTE statistics;
ANALYZE TABLE SYS.T1 COMPUTE statistics
*
ERROR at line 1:
ORA-38029: object statistics are locked


Now realized , the statistics is locked.


SQL> exec dbms_stats.unlock_table_stats('SYS','T1');

PL/SQL procedure successfully completed.

SQL> ANALYZE TABLE SYS.T1 COMPUTE statistics;

Table analyzed.



[More Info]
You may also lock statistics at the schema level using the LOCK_SCHEMA_STATS procedure.

You may determine if a table's statistics are locked by querying the STATTYPE_LOCKED column in the
{USER | ALL | DBA}_TAB_STATISTICS view

To unlock the tables statitics for a specified table you may use the UNLOCK_TABLE_STATS procedure.
Error Message

sqlplus user/user@cmspt42g
Error 6 initializing SQL*Plus
Message file sp1.msb not found
SP2-0750: You may need to set ORACLE_HOME to your Oracle software directory


Description
Wrong permissions may be set on some files following installation
of the the 10gR2 base release (10.2.0.1).

eg: Some directories under $ORACLE_HOME have no world read nor
execute permission following the install (eg: rwxr-x---)

This can stop local users not in the oracle code owner group
from connecting using the bequeath driver.

Workaround:
Manually set the required permissions.
chmod -R 755 $ORACLE_HOME/*
or
Apply 10.2.0.2 or higher and run changeperm.sh
or
Download the one off patch for this bug and run changeperm.sh

Note:
A similar problem also occurs after applying the 9.2.0.7
Patch Set - that issue is covered under Bug 4533592.

Saturday, August 23, 2008

EXP-00008: ORACLE error 1403 encountered

. . exporting table BS221
EXP-00008: ORACLE error 1403 encountered
ORA-01403: no data found


Encountered when I export dynamic table in production during application is active.

Did some research, but doesn't match to my case. List the relevant URL below.

http://space.itpub.net/?uid-33600-action-viewspace-itemid-266420

Second thought, since it is dynamic data. I should give it 2nd try. (CONSISTENT=Y is used in the PAR file)

I succeed when I export the table again.

IMP-00020: long column too large for column buffer size (7)

Encountered this problem during import data from 9i to 10g database. Table structure was already laid out.

. . importing table "TEMP_PRE_ACC_INFO"
IMP-00020: long column too large for column buffer size (7)


The definition in source database (9i)
SQL> desc cmsopr.TEMP_PRE_ACC_INFO;
Name Null? Type
----------------------------------------- -------- ----------------------------
MSISDN VARCHAR2(8)
SUB_ID NUMBER(30)
SUB_TYPE VARCHAR2(10)
PERSONNAME VARCHAR2(50)
PERSONADDRESS VARCHAR2(100)
EMAIL VARCHAR2(50)
CONTACTNO VARCHAR2(10)
COMPANYNAME VARCHAR2(50)
COMPANYADDRESS VARCHAR2(100)
LASTUPDATEDATE TIMESTAMP(3)
DATEOFCREATION TIMESTAMP(3)
LAST_UPD_ID VARCHAR2(8)


And definition in target database (10g)

SQL> desc cmsopr.TEMP_PRE_ACC_INFO;
Name Null? Type
----------------------------------------- -------- ----------------------------
MSISDN VARCHAR2(8)
SUB_ID NUMBER(30)
SUB_TYPE VARCHAR2(10)
PERSONNAME VARCHAR2(50)
PERSONADDRESS VARCHAR2(100)
EMAIL VARCHAR2(50)
CONTACTNO VARCHAR2(10)
COMPANYNAME VARCHAR2(50)
COMPANYADDRESS VARCHAR2(100)
LASTUPDATEDATE TIMESTAMP(0)
DATEOFCREATION TIMESTAMP(0)
LAST_UPD_ID VARCHAR2(8)


According to metalink note Note:286597.1. This is a bug in 8i and 9i when table is created over DB link.

Below is the extraction from metalink.


Subject: Oracle9i: IMP-20 on Import of Table with TIMESTAMP Column that was Created via Database Link

Doc ID: Note:286597.1 Type: PROBLEM

Last Revision Date: 04-JUL-2008 Status: PUBLISHED

The information in this article applies to:
- Oracle Server - Enterprise Edition - Version: 8.1.7.0 to 9.2.0.5
- Oracle Server - Personal Edition - Version: 8.1.7.0 to 9.2.0.5
- Oracle Server - Standard Edition - Version: 8.1.7.0 to 9.2.0.5
This problem can occur on any platform.


ERRORS
------

IMP-00020: long column too large for column buffer size (7)


SYMPTOMS
--------

In a remote Oracle8i or higher release database, you have a table with
a column of the TIMESTAMP data type:

SQL> connect scott/tiger
SQL> alter session set nls_timestamp_format='YYYY-MM-DD HH24:MI:SSXFF';
SQL> create table my_original_tab (nr number, stamp_col timestamp(3));
SQL> insert into my_original_tab values (1, '2004-10-20 18:34:31.123456');
SQL> commit;
SQL> select * from my_original_tab;

NR STAMP_COL
---------- --------------------------------
1 2004-10-20 18:34:31.123


In your local Oracle8i or Oracle9i database you have created a database link:

SQL> connect system/manager
SQL> grant create database link to scott;

SQL> connect scott/tiger
SQL> alter session set nls_timestamp_format='MON DD, YYYY HH:MI:SSXFF AM';
SQL> create database link orcl.oracle.com
connect to scott identified by tiger using 'orcl';


Now you create a new table in this database with the CREATE TABLE AS SELECT
statement (CTAS), accessing the original table via the database link:

SQL> create table my_tab
as select * from my_original_tab@orcl.oracle.com;
SQL> select * from my_tab;

NR STAMP_COL
---------- --------------------------------
1 OCT 20, 2004 06:34:31.123 PM


You export this table:

% exp scott/tiger file=exp_tab.dmp log=exp_t.log tables=my_tab

About to export specified tables via Conventional Path ...
. . exporting table MY_TAB 1 rows exported
Export terminated successfully without warnings.


Any you try to import from this export dump file, e.g. into a different user:

% imp system/manager file=exp_tab.dmp log=imp_t.log fromuser=scott touser=hugo

. importing SCOTT's objects into HUGO
. . importing table "MY_TAB"
IMP-00020: long column too large for column buffer size (7)
Import terminated successfully with warnings.


- Increasing the value for the Import BUFFER parameter does not solve the
IMP-20 error.
- Pre-creating the table in the target schema, and running import with
IGNORE=Y does not solve the IMP-20 error.
- Specifying Import parameter COMMIT=Y does not import any row for this table.


CAUSE
-----

If you create a table with the CREATE TABLE ... AS SELECT (CTAS) syntax,
and the original table is located on a remote database that you access
via database link, and the table has a column with the TIMESTAMP data type,
then the new created table has an incorrect precision in the data dictionary.
Example:

If you would query the view dba_tab_columns for the original table in the
remote database you would see:

SQL> connect system/manager
SQL> select substr(data_type,1,15) data_type, data_length, data_precision,
data_scale, column_id, substr(column_name,1,15) column_name
from dba_tab_cols where owner='SCOTT' and table_name='MY_ORIGINAL_TAB'
order by column_id;

DATA_TYPE DATA_LENGTH DATA_PRECISION DATA_SCALE COLUMN_ID COLUMN_NAME
--------------- ----------- -------------- ---------- ---------- ------------
NUMBER 22 1 NR
TIMESTAMP(3) 11 3 2 STAMP_COL


However, a similar query in the local database (where you ran the CTAS),
gives a different result:

SQL> connect system/manager
SQL> select substr(data_type,1,15) data_type, data_length, data_precision,
data_scale, column_id, substr(column_name,1,15) column_name
from dba_tab_cols where owner='SCOTT' and table_name='MY_TAB'
order by column_id;

DATA_TYPE DATA_LENGTH DATA_PRECISION DATA_SCALE COLUMN_ID COLUMN_NAME
--------------- ----------- -------------- ---------- ---------- ------------
NUMBER 22 1 NR
TIMESTAMP(0) 11 0 2 STAMP_COL


If you export this table, there is a mismatch in the metadata syntax of
the table in the export dumpfile. As a result, import will fail because
import expects a field of 7 bytes (used to store the value of a timestamp
without any precision), but actually encounters values of 11 bytes (used
to store timestamps with a precision). As a result, an error is produced:

IMP-00020: long column too large for column buffer size (7)


FIX
---

This defect has been fixed in Oracle10g. This means that if the table is
created in Oracle10g via a database link and exported afterwards, then
the import of that table with a TIMESTAMP column, will not result
in an IMP-20 error.

In Oracle8i and Oracle9i, use the workaround by correcting the precision
of the TIMESTAMP column in the data dictionary. Example:

1. In the local database, determine the correct value for the precision
of the column with the timestamp data type. You can determine the correct
value with a simple DESCRIBE command:

SQL> connect scott/tiger
SQL> desc my_tab

Name Null? Type
----------------------------------------- -------- -------------
NR NUMBER
STAMP_COL TIMESTAMP(3)

2. Modify the column:

SQL> connect scott/tiger
SQL> alter table my_tab modify (stamp_col timestamp(3));

If you receive the error:
ORA-01866: the datetime class is invalid
Please refer to:
Note 372638.1C> "ORA-1866 on ALTER TABLE MODIFY when trying to change
precision of timestamp column"

3. Check for the correct output (scale = 3 in this case):

SQL> connect system/manager
SQL> select substr(data_type,1,15) data_type, data_length, data_precision,
data_scale, column_id, substr(column_name,1,15) column_name
from dba_tab_cols where owner='SCOTT' and table_name='MY_TAB'
order by column_id;

DATA_TYPE DATA_LENGTH DATA_PRECISION DATA_SCALE COLUMN_ID COLUMN_NAME
--------------- ----------- -------------- ---------- ---------- ------------
NUMBER 22 1 NR
TIMESTAMP(3) 11 3 2 STAMP_COL

4. Afterwards, re-create the export dumpfile:

% exp scott/tiger file=exp_tab.dmp log=exp_t.log tables=my_tab

About to export specified tables via Conventional Path ...
. . exporting table MY_TAB 1 rows exported
Export terminated successfully without warnings.

5. And re-run the import from the new export dump file:

% imp system/manager file=exp_tab.dmp log=imp_t.log fromuser=scott touser=hugo

. importing SCOTT's objects into HUGO
. . importing table "MY_TAB" 1 rows imported
Import terminated successfully without warnings.

REFERENCE
---------
Bug 2417643 "CTAS VIA DBLINK WITH TIMESTAMP COLUMN CHANGES PRECISION TO 0
IN DATA DICTIONARY".
Note 372638.1 "ORA-1866 on ALTER TABLE MODIFY when trying to change
precision of timestamp column"



PLS-00371

157/6 PL/SQL: SQL Statement ignored
157/93 PLS-00371: at most one declaration for 'RP_CMS_OFFER_RETRIEVAL.LCAMPAIGNCODE' is permitted


[ROOT CAUSE]
lcampaigncode VARCHAR2(10) := '';
lsegtable VARCHAR2(30) := '';
lidentification_no VARCHAR2(30) := '';
idnocounter INTEGER := 1;
idnostart INTEGER := 0;
idnoend INTEGER := -1;
lcampaigncode VARCHAR2(10) := '';
...


As you can see above, there are two lines to declare lcampaigncode.

After I removed one, re-compile the procedure successfully.


This problem occurred when I import objects from 9i database to 10g. The object was valid in 9i , however invalid in 10g. Think 10g has stricter checking.


Thursday, August 14, 2008

How to rename instance name?

method to rename instance
1. shutdown DB
2.prepare the following settings
prx06:DASP2:/software/oradas/product/9.2.0/dbs> ls -l
lrwxrwxr-x 1 oradas dba 46 Sep 22 17:21 initDASP.ora -> /software/oradas/admin/DASP/pfile/initDASP.ora
lrwxr-xr-x 1 oradas dba 46 Oct 13 11:51 initDASP2.ora -> /software/oradas/admin/DASP/pfile/initDASP.ora
-rwSr----- 1 oradas dba 3072 Oct 11 15:53 orapwDASP
-rwSr----- 1 oradas dba 3072 Oct 11 15:53 orapwDASP2
3. export ORACLE_SID=DASP2
4. change listener and tnsname.ora accordingly
5. change service_name inside init.ora

6. bring up and verify v$instance and v$database



db_name
DB_NAME specifies a database identifier of up to 8 characters. This parameter must be specified and must correspond to the name specified in the CREATE DATABASE statement.

db_unique_name
DB_UNIQUE_NAME specifies a globally unique name for the database. Databases with the same DB_NAME within the same DB_DOMAIN (for example, copies of a database created for reporting or a physical standby) must have a unique DB_UNIQUE_NAME. Every database's DB_UNIQUE_NAME must be unique within the enterprise.

instance_name
In a Real Application Clusters environment, multiple instances can be associated with a single database service. Clients can override Oracle's connection load balancing by specifying a particular instance by which to connect to the database. INSTANCE_NAME specifies the unique name of this instance.

In a single-instance database system, the instance name is usually the same as the database name.


service_names
SERVICE_NAMES specifies one or more names by which clients can connect to the instance. The instance registers its service names with the listener. When a client requests a service, the listener determines which instances offer the requested service and routes the client to the appropriate instance.

--before

SQL> show parameter name

NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
db_file_name_convert string
db_name string CMSPT42g
db_unique_name string CMSPT42g
global_names boolean FALSE
instance_name string CMSPT42g
lock_name_space string
log_file_name_convert string
service_names string CMSPT42g

--Follow above steps to rename current instance CMSPT42g to CMSPT42G2
--after

SQL> show parameter _name

NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
db_file_name_convert string
db_name string CMSPT42g
db_unique_name string CMSPT42g
global_names boolean FALSE
instance_name string CMSPT42G2
lock_name_space string
log_file_name_convert string
service_names string CMSPT42G2


psdev03:CMSPT42G2:/software/oracle/admin/CMSPT42g/pfile> ps -ef |grep CMSPT42G
oracle 25062 1 0 14:24:58 ? 0:00 ora_lgwr_CMSPT42G2
oracle 25058 1 0 14:24:58 ? 0:00 ora_mman_CMSPT42G2
oracle 25068 1 0 14:24:58 ? 0:00 ora_reco_CMSPT42G2
oracle 25094 1 0 14:25:19 ? 0:00 ora_q000_CMSPT42G2
oracle 25070 1 0 14:24:59 ? 0:00 ora_cjq0_CMSPT42G2
oracle 25064 1 0 14:24:58 ? 0:00 ora_ckpt_CMSPT42G2
oracle 25054 1 0 14:24:58 ? 0:00 ora_pmon_CMSPT42G2
oracle 25074 1 0 14:24:59 ? 0:00 ora_mmnl_CMSPT42G2
oracle 25141 24354 0 14:27:02 pts/2 0:00 grep CMSPT42G
oracle 25072 1 0 14:24:59 ? 0:00 ora_mmon_CMSPT42G2
oracle 25078 1 0 14:24:59 ? 0:00 ora_s000_CMSPT42G2
oracle 25120 1 0 14:25:59 ? 0:00 ora_q001_CMSPT42G2
oracle 25086 1 0 14:25:09 ? 0:00 ora_qmnc_CMSPT42G2
oracle 25056 1 0 14:24:58 ? 0:00 ora_psp0_CMSPT42G2
oracle 25066 1 0 14:24:58 ? 0:00 ora_smon_CMSPT42G2
oracle 25060 1 0 14:24:58 ? 0:00 ora_dbw0_CMSPT42G2
oracle 25076 1 0 14:24:59 ? 0:00 ora_d000_CMSPT42G2

Thursday, August 07, 2008

two good AWR articles

--to use AWR
http://www.dbapool.com/articles/061906.html

--to check AWR retention
select * from DBA_HIST_WR_CONTROL;


--to estimate AWR size
http://www.dba-oracle.com/t_awr_retention_period.htm

ORA-12537: TNS:connection closed

restarted listener, looks fine and user can connect.

but after a while, told me happened again.

funny and google internet, follow below article.

http://www.dba-oracle.com/t_ora_12537_tns_error.htm


nothing matches me.

No choice, try my luck to restart database. while shutdown it , hanging with dispatcher d000.


shutdown abort
startup

looks working more than 2 hours now.

to keep it monitor further.

Monday, June 23, 2008

完整的冷备和完整的热备对以后恢复数据而言差别大吗

Use fsck to fix "grub error 21"

-- Last Night when I shutdown down windows XP, it hung, so as usually I pressed the Power off button to turn off it by force.

Oops, today when I powered it on. "Grub Error 21" is threw to me.

Googled that , "Grub Error 21" means can't find the disk.


-- I installed SUSE , Centos and Windows XP on this PC.
-- Grub Loader is the boot loader
-- Partition for XP is FAT32. Another FAT32 partition has my data near 100Gb . (created using Magic Partition)
-- The Disk is about two years old.
-- It is a SATA2 hard disk, but the motherboard only supports SATA-1. So jump is set to run at SATA-1 mode
-- Never have such problem ever with press power off button by force.

Goto BIOS setup, there is no such 320GB SATA hard disk detected by defaul. When Pressed Enter to search it manually. The size detected is 136Gb.

--Uninstalled the HD, and connect to my another PC , which is running 3 other linuxs: SUSE 10.3, CentOS(RHEL) 5.2, and Ubuntu 8.4 .

-- Attach the HD to it, luckily that BIOS can detect it . But when booting failed with all three linuxs. I am disapointed. Why ?!

Warned my girlfriend that all data might lost, including mine. The most important thing to her is she can't IPTV while lying on bed and may lose many digital photoes.

She is sad. :(

Actually, I have more concerns as I have 100Gb data there for ten years. Not all has backup.



Watching TV and thought again, what else I haven't tried.


Finally, I had the clue. The motherboard is new and support SATA2. Why I stilled set the jump to SATA-1 ?


Trying again ...
--Booting Failed with CentOS
--Booting Failed with Suse
--Ubuntu brought to rescue mode .

Do a fdisk -l /dev/sda

All partitions are there. Good!

-- Mount my data partition and move to another 500Gb hard disk without error. (This partition's filesystem looks ok).
-- Now I suspect there is some logical corruption (c:\ filesystem), instead of hardware corruption.
-- fsck come to my mind.
-- unmount my data partition /dev/sda8 and ensure no mounting for /dev/sda1
-- fsck /dev/sda1 . Waw there is many illegale file name there. I just drop or auto-rename them one by one.
-- About 15 mins past, the fsck finished.
-- Now verify with CentOS, which boot successfully.


Set back to SATA-1 and attach to orignal PC.
-- BIOS is able to detect is properly.
-- Continue to boot, no more grub error 21 and I see the XP Flag is flying again.

Cheers! Thanks to Ubuntu fsck.



Lession learned and doubts:

-- Press power off button with caution. This may corrupt the boot sector. (or caused by virus ?)
-- What is enhanced inside Ubuntu fsck? As can't work with suse /rhel linux ?
-- Does fsck support NTFS ? It looks FAT32 still support partition large than 32Gb well. (use magicPartition)

Sunday, June 08, 2008

Friday, May 30, 2008

Merge Join caused Cartesian Product

--Merge Join caused Cartesian Product

1591 RPTOPR CREATE TABLE tmp_aio_acct_tp1 as SELECT distinct(a.customer_id),
1591 RPTOPR decode((c.bld_stat_id),1,'Blacklisted') blist_status, d.bdr_evs
1591 RPTOPR dt start_blist, d.bdr_blist_resn_type_id blist_reason FROM tmp_a
1591 RPTOPR io_acct_tb a, blist_cust b, blist_dtls c, blist_dtls_resn_code d
1591 RPTOPR -- WHERE rtrim(a.customer_id) = rtrim(b.blc_cust_id) and WHERE
1591 RPTOPR rtrim(a.customer_id) = b.blc_cust_id and b.bld_blist_id=c.bld_bl
1591 RPTOPR ist_id and b.bld_blist_id=d.bld_blist_id and d.bdr_evedt is NULL
1591 RPTOPR


--this query can't finish event after 10 hours.
--expect 16 hours to completed.
--notice the executions is exterm high.

^LSQL ordered by Executions DB/Inst: PRXP/PRXRP Snaps: 581-587
-> Total Executions: 96,444,779
-> Captured SQL account for 100.0% of Total

CPU per Elap per
Executions Rows Processed Rows per Exec Exec (s) Exec (s) SQL Id
------------ --------------- -------------- ---------- ----------- -------------
96,183,306 0 0.0 0.00 0.00 b9ag273wzuhbx
Module: oracleODSP@ods01 (TNS V1-V3)
SELECT "BLC_CUST_ID","BLD_BLIST_ID" FROM "DBO"."BLIST_CUST" "B" WHERE "BLC_CUST_
ID"=:1 AND "BLD_BLIST_ID"=:2


Elapsed CPU Elap per % Total
Time (s) Time (s) Executions Exec (s) DB Time SQL Id
---------- ---------- ------------ ---------- ------- -------------
2,900 2,898 96,183,306 0.0 14.4 b9ag273wzuhbx
Module: oracleODSP@ods01 (TNS V1-V3)
SELECT "BLC_CUST_ID","BLD_BLIST_ID" FROM "DBO"."BLIST_CUST" "B" WHERE "BLC_CUST_
ID"=:1 AND "BLD_BLIST_ID"=:2


^LSQL ordered by Gets DB/Inst: PRXP/PRXRP Snaps: 581-587
-> Resources reported for PL/SQL code includes the resources used by all SQL
statements called by the code.
-> Total Buffer Gets: 719,230,530
-> Captured SQL account for 99.9% of Total

Gets CPU Elapsed
Buffer Gets Executions per Exec %Total Time (s) Time (s) SQL Id
-------------- ------------ ------------ ------ -------- --------- -------------
319,369,588 96,183,306 3.3 44.4 2897.99 2900.45 b9ag273wzuhbx
Module: oracleODSP@ods01 (TNS V1-V3)
SELECT "BLC_CUST_ID","BLD_BLIST_ID" FROM "DBO"."BLIST_CUST" "B" WHERE "BLC_CUST_ID"=:1 AND "BLD_BLIST_ID"=:2



1591 RPTOPR 1 0 1 LOAD AS SELECT
1591 RPTOPR 2 1 1 SORT
1591 RPTOPR 3 2 1 NESTED LOOPS
1591 RPTOPR 4 3 1 NESTED LOOPS
1591 RPTOPR 5 4 1 MERGE JOIN
1591 RPTOPR 6 5 1 REMOTE BLIST_DTLS_RESN_CODE
1591 RPTOPR 7 5 2 BUFFER
1591 RPTOPR 8 7 1 TABLE ACCESS TMP_AIO_ACCT_TB
1591 RPTOPR 9 4 2 REMOTE BLIST_CUST
1591 RPTOPR 10 3 2 REMOTE BLIST_DTLS


-- the rows of these two tables explains the high number of executions.
-- 3 millions rows in BLIST_DTLS_RESN_CODE and one quarter records meet "d.bdr_evedt is NULL", 7k rows in TMP_AIO_ACCT_TB
-- 3200k/4 * 7k = 5600k * k = 5600 Millions



SELECT distinct(a.customer_id),
decode((c.bld_stat_id),1,'Blacklisted') blist_status, d.bdr_evsdt start_blist, d.bdr_blist_resn_type_id blist_reason
FROM t1 a, blist_cust b, blist_dtls c, blist_dtls_resn_code d
WHERE rtrim(a.customer_id) = b.blc_cust_id and b.bld_blist_id=c.bld_blist_id and b.bld_blist_id=d.bld_blist_id and d.bdr_evedt is NULL;

SELECT /*+ ordered use_nl(b c d) */
distinct(a.customer_id),
decode((c.bld_stat_id),1,'Blacklisted') blist_status, d.bdr_evsdt start_blist, d.bdr_blist_resn_type_id blist_reason
FROM t1 a, blist_cust b, blist_dtls c, blist_dtls_resn_code d
WHERE rtrim(a.customer_id) = b.blc_cust_id and b.bld_blist_id=c.bld_blist_id
and b.bld_blist_id=d.bld_blist_id and d.bdr_evedt is NULL;

1601 RPTOPR 0 17168 SELECT STATEMENT
1601 RPTOPR 1 0 1 SORT
1601 RPTOPR 2 1 1 NESTED LOOPS
1601 RPTOPR 3 2 1 NESTED LOOPS
1601 RPTOPR 4 3 1 NESTED LOOPS
1601 RPTOPR 5 4 1 TABLE ACCESS T1
1601 RPTOPR 6 4 2 REMOTE BLIST_CUST
1601 RPTOPR 7 3 2 REMOTE BLIST_DTLS
1601 RPTOPR 8 2 2 REMOTE BLIST_DTLS_RESN_CODE



-- finished within 1 mins



--try this hint (bad plan same as orginal)
SELECT /*+ use_nl(b c d) */
distinct(a.customer_id),
decode((c.bld_stat_id),1,'Blacklisted') blist_status, d.bdr_evsdt start_blist, d.bdr_blist_resn_type_id blist_reason
FROM t1 a, blist_cust b, blist_dtls c, blist_dtls_resn_code d
WHERE rtrim(a.customer_id) = b.blc_cust_id and b.bld_blist_id=c.bld_blist_id
and b.bld_blist_id=d.bld_blist_id and d.bdr_evedt is NULL;


1601 RPTOPR 0 4506 SELECT STATEMENT
1601 RPTOPR 1 0 1 SORT
1601 RPTOPR 2 1 1 NESTED LOOPS
1601 RPTOPR 3 2 1 NESTED LOOPS
1601 RPTOPR 4 3 1 MERGE JOIN
1601 RPTOPR 5 4 1 REMOTE BLIST_DTLS_RESN_CODE
1601 RPTOPR 6 4 2 BUFFER
1601 RPTOPR 7 6 1 TABLE ACCESS T1
1601 RPTOPR 8 3 2 REMOTE BLIST_CUST
1601 RPTOPR 9 2 2 REMOTE BLIST_DTLS

-
--tried ordered only

SELECT /*+ ordered */
distinct(a.customer_id),
decode((c.bld_stat_id),1,'Blacklisted') blist_status, d.bdr_evsdt start_blist, d.bdr_blist_resn_type_id blist_reason
FROM t1 a, blist_cust b, blist_dtls c, blist_dtls_resn_code d
WHERE rtrim(a.customer_id) = b.blc_cust_id and b.bld_blist_id=c.bld_blist_id
and b.bld_blist_id=d.bld_blist_id and d.bdr_evedt is NULL;

1601 RPTOPR 0 9458 SELECT STATEMENT
1601 RPTOPR 1 0 1 SORT
1601 RPTOPR 2 1 1 HASH JOIN
1601 RPTOPR 3 2 1 REMOTE BLIST_DTLS_RESN_CODE
1601 RPTOPR 4 2 2 HASH JOIN
1601 RPTOPR 5 4 1 TABLE ACCESS T1
1601 RPTOPR 6 4 2 REMOTE


--okay



--think driving_site should also work, but have no chance to test again.


The join operations group of hints controls how joined tables merge data together. A join
operation may direct the optimizer to choose the best path for retrieving all rows for a query
(throughput) or for retrieving the first row (response time).
while ORDERED tells the optimizer to join the tables based on their
order in the FROM clause using the first table listed as the driving table (accessed first).

--tuned queries from hours to seconds using this method


However, as to root casue , optimizer is still a black box to us!

structure of redo.log is exactly the same as its archived log

What happens when issue below command in the wrong database

recover data using backup control file until cancel;

This could be happened on Windows platform, if you unfortunately have the following settings.

1. Has below setting on both client & server node in sqlnet.ora
SQLNET.AUTHENTICATION_SERVICES = (NTS) #allow local login as sysdba
2. has the tnsnames.ora to point to remote database.

As REMOTE_OS_AUTHENT has no affect on Windows Native Authentication. The same information is passed
regardless of this setting.
-- bad control caused the disaster


Hence, after press enter, the database is likely become recovery mode. (You may not know immediately ...).

--think control file status changed

Finally you will know ...


When you try to recover it :

--you can't use real backuped control file without restore all backed up datafiles
--startup mount
--RECOVER database using backup controlfile until cancel ; (this control file is current file which just corrupted by the command)


it complains can't find the last archived log file . e.g. TEST10P_001_0627748383_17328.ARC
actually, it is the CURRENT redo log.


So what I did is , copy the redo.log to archived directory and rename it to TEST10P_001_0627748383_17328.ARC


then tried the command again


ORA-279 signalled during: ALTER DATABASE RECOVER database using backup controlfile until cancel ...
Fri May 23 16:16:58 2008
ALTER DATABASE RECOVER CONTINUE DEFAULT
Fri May 23 16:16:58 2008
Media Recovery Log G:\TESTDB\ORAARCH2\TEST10P\TEST10P_001_0627748383_17328.ARC


RECOVER database using backup controlfile until cancel ;

cheers ! I saw the message: Media Recovered.

Finally, "alter database open resetlogs;"


We are saved!

Heart beat dropped from 200 to 65.



From this case, I guess the file structure of redo.log is exactly the same as its archived log.

DBMS package for tracing

--using dbms_system
execute dbms_system.set_sql_trace_in_session(9,190,TRUE);

--You can also initiate a TRACE for the session that you are in using the DBMS_SESSION package.

execute DBMS_SESSION.SET_SQL_TRACE (TRUE);

--10g

DBMS_MONITOR

Sunday, May 25, 2008

白狐

曾经沧海难为水,除却巫山不是云。
取次花丛懒回顾,半缘修道半缘君。

Friday, May 23, 2008

the after images

Today boss asked me log file is before image or after image.

I am confused, as the term "after image" ...

Google it and got the followings, ... , ring boss a call:

Oracle Log Buffer

Oracle creates redo logs for all update operations. In case of disk failure, the redo logs are used to roll forward, since they contain the after image of all row changes.

First, the after images are written to the log buffer area of RAM. The LGWR background process then transfers the images to Oracle online redo log files. In the last step, the ARCH background process writes the online redo log files to the archived redo log file system, where they are available to recover the database in the event of disk failure.

However, some shops avoid this overhead by using triple-mirrored disks and running their databases in NOARCHIVELOG mode. These shops believe the high redundancy is sufficient protection from a disk crash, and they deliberately reject the ability to roll the database forward, in return for faster performance.

Thursday, May 22, 2008

redo log size

CHECK V$MTTR_TARGET_ADVICE and v$log_history for log switch ,
and optimal log size , which is influced by mttr target

The view V$INSTANCE_RECOVERY contains a new column, OPTIMAL_LOGFILE_SIZE,
which recommends a minimum size for the redo log files:
SQL> select optimal_logfile_size from v$instance_recovery;

Wednesday, May 21, 2008

Determining the Bad Source of statistics

Basic Analysis:
1.Is the rows estimate from explain plan correct?
2.Is the rows estimate correct in all_[tab|part]_col_statistics?
3.Is the sample size large enough to give accurate row estimates?
4.Obtain accurate stats and recheck the plan

Monday, May 19, 2008

About Histogram

Generally help with skewed data
column level statistics (include index statistics and histrogram)

Saturday, May 17, 2008

Abandom Notepad++

Finally I uninstall it, which was with me more than 3 years.

The reason is that it is boycotting Beijing Olympic 2008. Don't have any reason to appreciate such a programmer.

There are quite a few replacement listed here:

--gvim
--Textpad
--Programmer's Notepad

Cheers. I am not more feeling disgusting whenever using the text editor.

block's life of Full Table Scan

When a block needs to be read into the buffer cache, Oracle must first find a free buffer in
which to store the block. The process searches the LRU list, beginning at the least recently used
end. It will search until it finds a free buffer. If it cannot find any, it will signal the DBW0 process
to flush any dirty blocks back to disk in order to make room. If no dirty blocks exist, the least
recently used block will be aged out to make room. The block will then be written to the buffer,
and the buffer moves to the most recently used end of the LRU list.

The exception to this rule is when a full table scan operation is performed. Blocks retrieved
from a full table scan are added to the least recently used end of the LRU list, so they will be aged
out quickly. This is because full table scan blocks are generally scanned quickly and are no longer
needed. This functionality can cause problems for small tables that are accessed frequently, such
as lookup tables. Small tables are meant to be accessed via full table scans. This is because a full
table scan will outperform an index lookup on a very small table.
Because Oracle 10g will put these blocks at the least recently used end of the LRU list, they
will age out quickly. As a result, the next time that the table is accessed, Oracle 10g may likely
have to perform PIO to retrieve the blocks again. In this situation, you can add the CACHE clause
to the table (either via ALTER TABLE or in the CREATE TABLE statements) to circumvent this
behavior. You can also choose to pin the table in memory.

remove external procedures

By default, Oracle creates a service for external procedures in the listener. For the majority
of users, this service is never used. Therefore, the service should be removed from the listener.

Database checkpoint

Database checkpoints are closely tied to redo log file switches. A checkpoint is an event that
flushes the modified data from the buffer cache to the disk and updates the control file and datafiles.
The CKPT process updates the headers of datafiles and control files; the actual blocks are
written to the file by the DBWn process. A checkpoint is initiated
When the redo log file is filled and a log switch occurs.
When the instance is shut down with NORMAL, TRANSACTIONAL, or IMMEDIATE.
When a tablespace status is changed to read-only or put into BACKUP mode.
When other values specified by certain parameters (discussed later in this section) are
reached.
You can force a checkpoint if needed, as shown here:
ALTER SYSTEM CHECKPOINT;
Forcing a checkpoint ensures that all changes to the database buffers are written to the datafiles
on disk.
Another way to force a checkpoint is by forcing a log file switch:
ALTER SYSTEM SWITCH LOGFILE;

Automatic Checkpoint Tuning
===========================
Oracle Database 10g supports automatic checkpoint tuning. It is enabled if
fast_start_mttr_target is explicitly set to an non-zero value, or
if fast_start_mttr_target is not set at all.
It is an advancement over the MTTR related parameter introduced in
earlier versions. The idea is to use the periods of low I/O usage
to advance checkpoints and therefore improve availability.

How it works
============
Enabling fast-start checkpointing increases the average number of writes
per transaction that DBWn issues for a given workload. However, if the
system is not already near or at its maximum I/O capacity, then
fast-start checkpointing has a negligible impact on performance

How to monitor
==============
View V$MTTR_TARGET_ADVICE will give information on the additional I/O's
on various values of FAST_START_MTTR_TARGET.

PROs and CONs:
=============
- FAST_START_MTTR_TARGET is set to a low value :
Fast-start checkpointing is more aggressive.
The average number of writes per transaction that DBWn issues
is higher in order to keep the thread checkpoint sufficiently
advanced to meet the requested MTTR.
- FAST_START_MTTR_TARGET is set to a high value:
Fast-start checkpointing in less aggressive, and the average
number of writes per transaction that DBWn issues is lower.

rows returned order

In heap-organized tables and traditional hash clusters, the order in which
rows are returned is not under user control and depends on internal algorithms and the relative
physical location of data blocks on disk.
For each hash cluster key, Oracle maintains a list of
rows sorted by one or more sort columns.

Thursday, May 15, 2008

object's precedence

There is an order of precedence with regards to the use of synonyms and local objects. This is:

1. Local objects will always be accessed first.

2. If a local object does not exist, the object with a private synonym will be accessed.

3. If a private synonym does not exist or the object does not exist, then the public synonym will be used.

Note that a synonym can be created for an object that does not exist, and an object with an associated synonym can be dropped without removing the synonym. This can cause all sorts of interesting problems for DBA’s, so be careful.

Monday, May 12, 2008

Ojbect Namespace

the namespace shared by tables and views, and the database has separate
namespaces for each of the following:
Indexes
Constraints
Clusters
Database triggers
Private database links
Dimensions
Roles
Public synonyms
Public database links
Tablespaces
Profiles
Parameter files (PFILEs)

reconfig DB Console HTTP port number

cat $ORACLE_HOME/install/portlist.ini

with the arguments to configure DB Control and Agent Port number

emca -reconfig ports -DBCONTROL_HTTP_PORT 5504 -AGENT_PORT 1834

STARTED EMCA at May 12, 2008 2:08:16 PM
EM Configuration Assistant, Version 10.2.0.1.0 Production
Copyright (c) 2003, 2005, Oracle. All rights reserved.

Enter the following information:
Database SID: MNPP

Do you wish to continue? [yes(Y)/no(N)]: Y
May 12, 2008 2:08:24 PM oracle.sysman.emcp.EMConfig perform
INFO: This operation is being logged at /software/oramnp/product/10.2.0/cfgtoollogs/emca/MNPP/emca_2008-05-12_02-08-16-PM.log.
May 12, 2008 2:08:24 PM oracle.sysman.emcp.util.DBControlUtil stopOMS
INFO: Stopping Database Control (this may take a while) ...
May 12, 2008 2:08:38 PM oracle.sysman.emcp.util.DBControlUtil startOMS
INFO: Starting Database Control (this may take a while) ...
May 12, 2008 2:10:20 PM oracle.sysman.emcp.EMDBPostConfig performReconfiguration
INFO: >>>>>>>>>>> The Database Control URL is http://mnp:5504/em <<<<<<<<<<<
Enterprise Manager configuration completed successfully
FINISHED EMCA at May 12, 2008 2:10:20 PM