Oracle SQL
DB Admin
SQL*Plus
sqlplus sys/oracle@evdsmgw01.evd.int:1521/smgwa as sysdba --or export ORACLE_HOME=/app/oracle/product/12.1.0.2/ee export ORACLE_SID=emtectn /app/oracle/product/12.1.0.2/ee/bin/sqlplus / as sysdba -- on which DB am I? select instance_name from v$instance; --DB Start and Stop --connect sqlplus / as sysdba -- shutdown shutdown immediate; -- start startup;
DB objects
-- Oracle Parameter SELECT * FROM v$parameter t ORDER BY 2 ASC; SELECT * FROM v$parameter t WHERE t.name = 'db_block_size'; -- Oracle Versionsnummer SELECT * FROM v$version; -- nice dba views dba_segments dba_tables -- all_tables ist Nutzerabhängig und zeigt weniger Daten dba_lobs all_tab_comments all_indexes -- All Functions select * from dba_objects where object_type = 'FUNCTION' --and lower(object_name) like 'fnc_get_initial_c%' order by object_name; -- Package Versions SELECT 'PKG_IMPORT_VD' AS "Package", PKG_IMPORT_VD.fnc_get_header_version AS "Header", PKG_IMPORT_VD.fnc_get_body_version AS "Body" FROM dual; -- deactivated triggers SELECT * FROM dba_triggers WHERE status != 'ENABLED'; -- List all grants to a Table SELECT * FROM DBA_TAB_PRIVS WHERE TABLE_NAME = 'GTMP_FARZ_GERAETE_STATUS'; -- search for a table SELECT object_name, owner FROM dba_objects WHERE object_type LIKE ('TABLE%') AND LOWER(object_name) LIKE ('%hist%') -- AND owner LIKE ('t%') ORDER BY 1 ASC;
Invalid Objects
-- This script will display a list of Oracle invalid objects: SELECT object_name, owner, object_type, status FROM dba_objects WHERE status != 'VALID' ORDER BY owner, object_type; -- Fix Invalid Objects BEGIN FOR r IN (SELECT x.owner, x.object_name, x.object_type, x.object_id FROM dba_objects x WHERE x.status = 'INVALID' -- AND x.object_type != 'SYNONYM' ORDER BY x.owner, x.object_name) LOOP dbms_output.put('Validiere ' || r.owner || '.' || r.object_name || ' (' || r.object_type || ', ' || r.object_id || ') ... '); dbms_utility.validate(object_id => r.object_id); FOR r_erg IN (SELECT x.status FROM dba_objects x WHERE x.object_id = r.object_id) LOOP dbms_output.put_line(r_erg.status); END LOOP; --EXIT; END LOOP; END; / -- Var 2 All Objects of OWNER begin dbms_utility.compile_schema('EC_TSI',false); end; /
Size
see Oracle Basic Compression for Table
-- Update Statistics (DBA_Tables etc) EXEC DBMS_STATS.GATHER_TABLE_STATS('SCHEMA1','COMPANY'); -- or EXEC DBMS_STATS.GATHER_SCHEMA_STATS('SCHEMA1'); -- or exec DBMS_STATS.GATHER_DATABASE_STATS; -- mehr Infos, aber Splittung durch Partitionierung / SubTables SELECT segment_name, segment_type, bytes FROM dba_segments ORDER BY bytes DESC -- Max Size pro Datei ermitteln select file_name, maxbytes/1024/1024 Max_MB from dba_data_files; -- Auto Extend datafile für alle Datefiles (größe ggf. jeweils anpassen?) select 'alter database datafile ||name|| autoextend on next 100M maxsize unlimited;' from v$datafile ; WITH wasted AS (SELECT TABLESPACE_NAME, round(SUM(BYTES) / 1024 / 1024, 0) "MB wasted" FROM DBA_FREE_SPACE GROUP BY TABLESPACE_NAME) SELECT df.tablespace_name, df.file_name, df.file_id, round(df.bytes / 1024 / 1024, 0) AS "DF MB", w."MB wasted" FROM dba_data_files df JOIN wasted w ON w.TABLESPACE_NAME = df.TABLESPACE_NAME ORDER BY 5 DESC;
Size Analyse
---Title: Overview SELECT t.*, s.*, f.* FROM ( SELECT ROUND(SUM(NUM_ROWS * AVG_ROW_LEN)/1024/1024/1024,0) AS "GB Tables netto" FROM DBA_TABLES ) t , ( SELECT ROUND(SUM(bytes)/1024/1024/1024,0) AS "GB Segments" FROM DBA_SEGMENTS s ) s , ( SELECT ROUND(SUM(bytes)/1024/1024/1024,0) AS "GB DataFiles" FROM DBA_DATA_FILES f ) f ; ---Title: SegmentTypes SELECT s.segment_type AS "Type" , ROUND(SUM (s.BYTES)/1024/1024/1024, 2) AS GB , COUNT(*) AS "Anz" FROM dba_segments s GROUP BY s.segment_type order by 2 desc ; ---Title: SegmentTypes TBS EC_ SELECT s.segment_type AS "Type" , ROUND(SUM (s.BYTES)/1024/1024/1024, 2) AS GB , COUNT(*) AS "Anz" FROM dba_segments s WHERE (s.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#') GROUP BY s.segment_type order by 2 desc ; ---Title: SegmentTypes TBS EC_ + NULL SELECT s.segment_type AS "Type" , ROUND(SUM (s.BYTES)/1024/1024/1024, 2) AS GB , COUNT(*) AS "Anz" FROM dba_segments s WHERE (s.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#' OR s.TABLESPACE_NAME IS NULL) -- null für partitionierte GROUP BY s.segment_type order by 2 desc ; ---Title: Ranking Segments SELECT s.SEGMENT_NAME AS "SEGMENT_NAME" , ROUND (SUM(s.BYTES) /1024/1024/1024, 2) AS GB FROM DBA_SEGMENTS s WHERE (s.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#' OR s.TABLESPACE_NAME IS NULL) -- null für partitionierte GROUP BY (SEGMENT_NAME) HAVING SUM(s.BYTES)>1*1024*1024 --1 MB Grenze ORDER BY 2 DESC ; ---Title: Ranking Tablespaces SELECT t.tablespace_name , ROUND(SUM(t.BYTES/1024/1024/1024),0) AS GB FROM DBA_SEGMENTS t GROUP BY t.tablespace_name ORDER BY 2 DESC ; ---Title: Ranking DataFiles SELECT t.TABLESPACE_NAME, ROUND(SUM(t.BYTES/1024/1024/1024),0) AS GB FROM DBA_DATA_FILES t GROUP BY t.TABLESPACE_NAME ORDER BY 2 DESC ; ---Title: TSP-vs-DataFiles -- joined Table Space size and Data Files size SELECT u1.tablespace_name , u1."GB_TSP", u2."GB_DF", u2."GB_DF"-u1."GB_TSP" AS "Diff DF-TSP" FROM ( SELECT t.tablespace_name , ROUND(SUM(t.BYTES/1024/1024/1024),1) AS "GB_TSP" FROM DBA_SEGMENTS t GROUP BY t.tablespace_name) u1 JOIN ( SELECT t.tablespace_name , ROUND(SUM(t.BYTES/1024/1024/1024),1) AS "GB_DF" FROM DBA_DATA_FILES t GROUP BY t.tablespace_name) u2 ON u1.tablespace_name = u2.tablespace_name ORDER BY 4 DESC ; ---Title: Indizes SELECT s.segment_name "Index", ROUND(s.BYTES/1024/1024/1024,2) "GB" , i.table_name FROM dba_segments s LEFT OUTER JOIN all_indexes i ON i.index_name = s.segment_name WHERE segment_type LIKE 'INDEX%' AND s.BYTES> 1024*1024*1024 -- 1GB ORDER BY 2 DESC ; ---Title: IndizesPerTab SELECT i.table_name, ROUND(SUM(s.BYTES/1024/1024/1024),2) "GB" FROM dba_segments s LEFT OUTER JOIN all_indexes i ON i.index_name = s.segment_name WHERE segment_type LIKE 'INDEX%' GROUP BY i.TABLE_NAME HAVING SUM(s.BYTES) > 1024*1024*1024 --1GB ORDER BY 2 DESC ; ---Title: Lobs SELECT l.segment_name, l.table_name, l.column_name ,ROUND(s.bytes/1024/1024/1024, 2) AS "GB" FROM dba_lobs l JOIN dba_segments s ON (l.segment_name = s.segment_name) WHERE s.bytes > 0.1* 1024*1024*1024 ORDER BY 4 DESC ; ---Title: DoppelteTabNamen SELECT t.table_name, count(*) FROM dba_tables t group by t.table_name having count(*)>=2 ORDER BY 1 ; ---Title: Verteilung auf Tabellenpartitionen with DatenFilter as ( select t.table_owner, t.table_name, sum (t.num_rows) FROM DBA_TAB_PARTITIONS t where t.tablespace_name like 'EC#_%' escape '#' group by t.table_owner, t.table_name having sum (t.num_rows) > 0 ) SELECT table_owner, table_name, partition_name, partition_position, num_rows, blocks FROM DatenFilter t1 join DBA_TAB_PARTITIONS t using (table_owner, table_name) order by table_owner, table_name, partition_position ; ---Title: Verteilung auf Indexpartitionen with DatenFilter as ( select t.index_owner, t.index_name, sum (t.num_rows) FROM dba_ind_partitions t where t.tablespace_name like 'EC#_%' escape '#' group by t.index_owner, t.index_name having sum (t.num_rows) > 0 ) SELECT i.owner, i.TABLE_NAME, i.index_name, ip.partition_name, ip.partition_position, ip.num_rows FROM DatenFilter f join dba_indexes i on i.owner = f.index_owner and i.index_name = f.index_name join dba_ind_partitions ip on ip.index_owner = f.index_owner and ip.index_name = f.index_name order by i.owner, i.TABLE_NAME, i.index_name, ip.partition_position ; ---Title: Tablespaces mit Platz im Datafile (fragmentierte) SELECT TABLESPACE_NAME, round(SUM(BYTES) / 1024 / 1024 / 1024, 2) "GB wasted" FROM DBA_FREE_SPACE GROUP BY TABLESPACE_NAME ORDER BY 2 DESC;
Table Size Analysis
---Title: Ranking Tables V2 via qInteressanteTabs WITH qSegments AS (SELECT SEGMENT_NAME AS table_name, owner, SUM(bytes) AS bytes FROM DBA_SEGMENTS WHERE segment_type LIKE 'TABLE%' --TABLE and TABLE PARTITION GROUP BY (SEGMENT_NAME, owner)), qIndizes AS (SELECT i.table_name, i.owner, SUM(s.bytes) AS bytes FROM dba_indexes i JOIN dba_segments s ON (i.index_name = s.segment_name AND i.owner = s.owner) GROUP BY i.table_name, i.owner), qLobs AS (SELECT l.table_name, l.owner, SUM(s.bytes) AS bytes FROM dba_lobs l JOIN dba_segments s ON (l.segment_name = s.segment_name AND l.owner = s.owner) -- ja, l.segment_name hier! GROUP BY l.table_name, l.owner), qInteressanteTabs AS (SELECT t.TABLE_NAME, t.OWNER FROM dba_tables t WHERE t.num_rows >= 1000000 UNION SELECT t.TABLE_NAME, t.OWNER FROM qSegments t WHERE t.bytes >= 100 * 1024 * 1024 UNION SELECT t.TABLE_NAME, t.OWNER FROM qIndizes t WHERE t.bytes >= 100 * 1024 * 1024 UNION SELECT t.TABLE_NAME, t.OWNER FROM qLobs t WHERE t.bytes >= 100 * 1024 * 1024) SELECT TABLE_NAME AS "Table" -- identifier "t." ungueltig bei nat join using , owner AS "Owner", t.TABLESPACE_NAME AS "TablesSpace", ROUND(t.NUM_ROWS / 1000000, 2) AS "MillRows", ROUND(t.NUM_ROWS * t.AVG_ROW_LEN / 1024 / 1024 / 1024, 2) AS "GB Netto" --, ROUND (t.blocks * 8 * 1024 /1024/1024/1024, 2) AS "GB Brutto" , ROUND(s.BYTES / 1024 / 1024 / 1024, 2) AS "GB Segments", ROUND((s.BYTES - (t.NUM_ROWS * t.AVG_ROW_LEN)) / 1024 / 1024 / 1024, 2) AS "GB Wasted" -- t.NUM_ROWS * t.AVG_ROW_LEN , ROUND(Indizes.BYTES / 1024 / 1024 / 1024, 2) AS "GB Indizes", ROUND(Lobs.BYTES / 1024 / 1024 / 1024, 2) AS "GB Lobs", ROUND((s.BYTES + nvl(Indizes.bytes, 0) + nvl(Lobs.BYTES, 0)) / 1024 / 1024 / 1024, 2) AS "GB S+I+L" , c.comments AS "TabComments" , t.last_analyzed AS "last_analyzed" FROM qInteressanteTabs JOIN dba_tables t USING (table_name, owner) JOIN ( -- hier kein outer join! SELECT TABLE_NAME, owner, bytes FROM qSegments) s USING (TABLE_NAME, owner) -- LEFT OUTER JOIN ( --Comments, hier kein "with", da so gefiltert wird SELECT owner, table_name, comments FROM dba_tab_comments) c USING (TABLE_NAME, owner) LEFT OUTER JOIN ( --Indizes SELECT table_name, owner, bytes FROM qIndizes) Indizes USING (TABLE_NAME, owner) LEFT OUTER JOIN ( --Lobs SELECT table_name, owner, bytes FROM qLobs) Lobs USING (TABLE_NAME, owner) --WHERE (t.TABLESPACE_NAME LIKE /*EC,SC,CC_*/ '_C#_%' ESCAPE '#' OR t.TABLESPACE_NAME IS NULL) -- null fuer partitionierte, dieser Filter erst hier am Ende! WHERE owner NOT IN ('SYS', 'XDB') ORDER BY 10 DESC -- 10 = S+I+L ;
Groth
/* Create two tables to monitor the growth of large tables vs. time. prc_mon_table_growth is used to insert new data */ -- als sys/oracle grant select on DBA_SEGMENTS to ec_mon; --ec_ewe grant select on DBA_TABLES to ec_mon; --ec_ewe grant select on DBA_SEGMENTS to ec_sys; grant select on DBA_TABLES to ec_sys; -- als ec_mon -- Creates CREATE TABLE RDS$TOMENKE$GROWTH1 ( SEGMENT_NAME VARCHAR2(81) NOT NULL, OWNER VARCHAR2(30) NOT NULL, DATE_INSERTED DATE NOT NULL, BYTES NUMBER NOT NULL, IGNORE VARCHAR2(1) NULL ); ALTER TABLE RDS$TOMENKE$GROWTH1 ADD CONSTRAINT RDS$ToMenke$Growth1 UNIQUE (SEGMENT_NAME, OWNER, DATE_INSERTED); COMMENT ON TABLE RDS$TOMENKE$GROWTH1 IS 'by Menke@RDS: Monitoring des Tab. Wachstums'; CREATE TABLE RDS$TOMENKE$GROWTH2 ( TABLE_NAME VARCHAR2(30) NOT NULL, OWNER VARCHAR2(30) NOT NULL, DATE_INSERTED DATE NOT NULL, DATE_ANALYZED DATE NOT NULL, NUM_ROWS NUMBER NOT NULL, BYTESNETTO NUMBER NOT NULL, BYTESBRUTTO NUMBER NOT NULL, BYTESSEGMENTS NUMBER NOT NULL, IGNORE VARCHAR2(1) NULL ); ALTER TABLE RDS$TOMENKE$GROWTH2 ADD CONSTRAINT RDS$ToMenke$Growth2 UNIQUE (TABLE_NAME, OWNER, DATE_INSERTED); COMMENT ON TABLE RDS$TOMENKE$GROWTH2 IS 'by Menke@RDS: Monitoring des Tab. Wachstums'; COMMIT; create or replace PROCEDURE prc_mon_table_growth AS BEGIN /* By Torben Menke, RDS */ DELETE FROM RDS$TOMENKE$GROWTH1 t WHERE t.date_inserted < SYSDATE - 2*365; -- älter als 2 Jahre DELETE from RDS$TOMENKE$GROWTH2 t WHERE t.date_inserted < SYSDATE - 2*365; -- älter als 2 Jahre --1 INSERT INTO RDS$TOMENKE$GROWTH1 SELECT s.SEGMENT_NAME AS "SEGMENT_NAME" , s.OWNER , (SELECT SYSDATE FROM dual) AS "date_inserted" , SUM(s.BYTES) AS "BYTES" , NULL FROM DBA_SEGMENTS s WHERE (s.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#' OR s.TABLESPACE_NAME IS NULL) -- null für partitionierte GROUP BY (s.SEGMENT_NAME, s.OWNER) HAVING SUM(s.BYTES)>1*1024*1024 --1 MB Grenze ORDER BY 1 ASC ; --2 INSERT INTO RDS$TOMENKE$GROWTH2 SELECT t.TABLE_NAME , t.OWNER , (SELECT SYSDATE FROM dual) AS "date_inserted" , t.last_analyzed , t.NUM_ROWS , t.NUM_ROWS * t.AVG_ROW_LEN AS "BYTESNETTO" , t.blocks * 8 * 1024 AS "BYTESBRUTTO" , s.BYTES AS "BYTESSEGMENTS" , NULL FROM DBA_TABLES t -- all Tables ist nutzerabhängig und zeigt weniger Daten! JOIN ( SELECT SEGMENT_NAME "TABLE_NAME", OWNER, SUM(BYTES) AS "BYTES" FROM DBA_SEGMENTS s WHERE segment_type LIKE 'TABLE%' AND (s.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#' OR s.TABLESPACE_NAME IS NULL) -- null für partitionierte GROUP BY (SEGMENT_NAME, OWNER) ) s ON (t.TABLE_NAME = s."TABLE_NAME" and t.OWNER = s.OWNER) WHERE (t.TABLESPACE_NAME LIKE 'EC#_%' ESCAPE '#' OR t.TABLESPACE_NAME IS NULL) -- null für partitionierte AND t.NUM_ROWS * t.AVG_ROW_LEN >1*1024*1024 --1 MB Grenze AND t.last_analyzed > nvl((SELECT MAX (m.date_inserted) FROM RDS$TOMENKE$GROWTH2 m ),sysdate-365) -- nur neue --AND t.date_analyzed >= SYSDATE -7 ORDER BY 1 ASC ; COMMIT; END; / -- als ec-mon grant execute on ec_mon.prc_mon_table_growth to ec_sys; --Test BEGIN prc_mon_table_growth; END; / -- Scheduler -> Kundenjob --'BEGIN ec_mon.prc_mon_table_growth; end;' /* DELETE FROM RDS$TOMENKE$GROWTH1 t WHERE t.date_inserted > SYSDATE - 1 ; DELETE from RDS$TOMENKE$GROWTH2 t WHERE t.date_inserted > SYSDATE - 1 ; commit; */ SELECT t.DATE_INSERTED, COUNT(*) FROM RDS$TOMENKE$GROWTH1 t GROUP BY t.DATE_INSERTED ORDER BY 1 DESC; SELECT t.DATE_INSERTED, COUNT(*) FROM RDS$TOMENKE$GROWTH2 t GROUP BY t.DATE_INSERTED ORDER BY 1 DESC; -- Auswertung -- 0 select t.date_inserted, count(*) from ec_mon.RDS$TOMENKE$GROWTH1 t group by t.date_inserted order by 1 desc ; select t.date_inserted, count(*) from ec_mon.RDS$TOMENKE$GROWTH2 t group by t.date_inserted order by 1 desc ; -- 1 SELECT t.segment_name, t.owner ,ROUND(last.bytes/1024/1024,1) AS "MB last" ,ROUND(MAX (t.bytes)/1024/1024,1) AS "max" --,ROUND(AVG (t.bytes)/1024/1024,1) -- AS "avg" ,ROUND(MIN (t.bytes)/1024/1024,1) AS "min" ,ROUND(REGR_SLOPE(t.bytes , t.date_inserted-SYSDATE)/1024/1024*365,1) AS "MB/Year" ,ROUND(100*REGR_SLOPE(t.bytes , t.date_inserted-SYSDATE)/AVG(t.bytes)*365,1) AS "Pct/Year" , round(max(t.date_inserted)-MIN(t.date_inserted),0) "days" , COUNT(*) AS "count" FROM ec_mon.RDS$TOMENKE$GROWTH1 t JOIN ( --Sub 1, die zu jedem table_name das Maxdate_inserted sucht, sollte durch max(t.date_inserted) ersetzt werden können SELECT segment_name, owner , MAX(date_inserted) AS MaxDate FROM ec_mon.RDS$TOMENKE$GROWTH1 GROUP BY segment_name, owner ) sub1 ON (t.segment_name = sub1.segment_name and t.owner = sub1.owner) -- end sub1 JOIN ( --Sub 2, die jedem table_name und MaxDate die entsprechende Zeile liefert SELECT * FROM ec_mon.RDS$TOMENKE$GROWTH1 ) "LAST" ON ( t.segment_name = last.segment_name AND t.owner = last.owner AND last.date_inserted = sub1.MaxDate ) -- end sub2 GROUP BY t.segment_name, t.owner ,ROUND(last.bytes/1024/1024,1) HAVING ( COUNT (*) > 1 AND AVG(t.bytes) != 0 -- AND MIN (t.bytes)!= MAX (t.bytes) -- nur Segmente mit Änderungen ) ORDER BY 2 DESC ; -- 2 /* TODO: Wachstum an NumRows seit letztem Eintrag in absoluten Werten/Tag */ SELECT t.table_name, t.owner ,ROUND(last.num_rows/1E3,1) AS "TRows last" ,ROUND(MAX(t.num_rows)/1E3,1) AS "max" --,ROUND(REGR_AVGY (t.num_rows , t.date_analyzed-SYSDATE)/1E3,1) --,ROUND(AVG(t.num_rows)/1E3,1) -- AS "avg" ,ROUND(MIN(t.num_rows)/1E3,1) AS "min" ,ROUND(REGR_SLOPE(t.num_rows , t.date_analyzed-SYSDATE)/1E3*365,1) AS "TRows/Year" ,ROUND(100*REGR_SLOPE(t.num_rows , t.date_analyzed-SYSDATE)/AVG(t.num_rows)*365,1) AS "Pct/Year" ,ROUND(last.bytesnetto/1024/1024,1) AS "MB netto last" ,ROUND(MAX(t.bytesnetto)/1024/1024,1) AS "max" --,ROUND(AVG(t.bytesnetto)/1024/1024,1) -- AS "avg" ,ROUND(MIN(t.bytesnetto)/1024/1024,1) AS "min" ,ROUND(REGR_SLOPE(t.bytesnetto , t.date_analyzed-SYSDATE)/1024/1024*365,1) AS "MB/Year" ,ROUND(100*REGR_SLOPE(t.bytesnetto , t.date_analyzed-SYSDATE)/AVG(t.bytesnetto)*365,1) AS "Pct/Year" ,ROUND(last.bytesbrutto/1024/1024,1) AS "MB brutto last" ,ROUND(MAX(t.bytesbrutto)/1024/1024,1) AS "max" --,ROUND(AVG(t.bytesbrutto)/1024/1024,1) -- AS "avg" ,ROUND(MIN(t.bytesbrutto)/1024/1024,1) AS "min" ,ROUND(REGR_SLOPE(t.bytesbrutto , t.date_analyzed-SYSDATE)/1024/1024*365,1) AS "MB/Year" ,ROUND(100*REGR_SLOPE(t.bytesbrutto , t.date_analyzed-SYSDATE)/AVG(t.bytesbrutto)*365,1) AS "Pct/Year" ,ROUND((last.bytesbrutto - last.bytesnetto)/1024/1024,1) AS "MB wasted" ,ROUND((last.bytesbrutto - last.bytesnetto)/last.bytesnetto,1) AS "% wasted" , round(max(t.date_analyzed) - MIN(t.date_analyzed),0) "days" , max(t.date_analyzed) AS "date_analyzed" --, sub3.LastDate14 , ROUND(100*(last.num_rows-last14.num_rows)/last14.num_rows,1) AS "2week%" , COUNT(*) AS "count" FROM ec_mon.RDS$TOMENKE$GROWTH2 t left outer JOIN ( --Sub 1, die zu jedem table_name das MaxDate sucht SELECT table_name , MAX(date_analyzed) AS MaxDate FROM ec_mon.RDS$TOMENKE$GROWTH2 GROUP BY table_name ) sub1 ON t.table_name = sub1.table_name -- end sub1 left outer JOIN ( --Sub 2, die jedem table_name und MaxDate die entsprechende Zeile liefert SELECT * FROM ec_mon.RDS$TOMENKE$GROWTH2 ) "LAST" ON ( t.table_name = last.table_name AND last.date_analyzed = sub1.MaxDate ) -- end sub2 left outer JOIN ( --Sub 3, die zu jedem table_name das max(datum) von >14 Tagen vor maxdate sucht = LastDate14 SELECT table_name , MAX(date_analyzed) AS LastDate14 FROM ec_mon.RDS$TOMENKE$GROWTH2 t21 WHERE date_analyzed + 14 < (SELECT MAX(date_analyzed) FROM ec_mon.RDS$TOMENKE$GROWTH2 WHERE table_name = t21.table_name) GROUP BY table_name ) sub3 ON t.table_name = sub3.table_name -- end sub3 left outer JOIN ( --Sub 4, die jedem table_name und LastDate14 die entsprechende Zeile liefert SELECT * FROM ec_mon.RDS$TOMENKE$GROWTH2 ) "LAST14" ON ( t.table_name = last14.table_name AND last14.date_analyzed = sub3.LastDate14 ) -- end sub4 WHERE last.bytesnetto != 0 AND t.ignore IS NULL AND last.bytesnetto != 0 AND last14.num_rows != 0 GROUP BY t.table_name, t.owner ,ROUND(last.num_rows/1E3,1) ,ROUND(last.bytesnetto/1024/1024,1) ,ROUND(last.bytesbrutto/1024/1024,1) ,ROUND((last.bytesbrutto - last.bytesnetto)/1024/1024,1) ,ROUND((last.bytesbrutto - last.bytesnetto)/last.bytesnetto,1) --,ROUND(last14.num_rows/1E3,1) --,sub3.LastDate14 , ROUND(100*(last.num_rows-last14.num_rows)/last14.num_rows,1) HAVING ( COUNT (*) > 1 AND AVG(t.num_rows) != 0 AND AVG(t.bytesnetto) != 0 ) ORDER BY 6 desc --11 DESC ; -- Auswertung in Excel-Form -- Auswertung via Pivot (etwas falsch, da date_inserted statt date_analyzed WITH pivot_data AS ( SELECT trunc(t.date_inserted) AS "Rows" , t.table_name, ROUND(t.num_rows/1E3,0) AS "TRows" FROM ec_mon.RDS$TOMENKE$GROWTH2 t WHERE t.ignore IS NULL ) SELECT * FROM pivot_data PIVOT ( AVG("TRows") FOR table_name IN ( 'DATENPOOL_ZEITREIHEN_LOG' ,'LINEARE_PROTOKOLLE_EXT' ,'TAGEBUECHER' ,'PULL_SYNC_READ' ,'EXPORT_LG' ,'FARZ_AUFTRAGSTRANSAKTIONEN' ,'FARZ_FNF_DETAILS' ,'ZR_DETAILS_HISTORIE' ) ) ORDER BY 1 DESC ; -- in MB -- Auswertung via Pivot (etwas falsch, da date_inserted statt date_analyzed WITH pivot_data AS ( SELECT trunc(t.date_inserted) AS "MByte" , t.table_name, ROUND(t.bytesnetto/1024/1024,0) AS "MB" FROM ec_mon.RDS$TOMENKE$GROWTH2 t WHERE t.ignore IS NULL ) SELECT * FROM pivot_data PIVOT ( AVG("MB") FOR table_name IN ( 'LINEARE_PROTOKOLLE_EXT' ,'EXPORT_LG' ,'IMP_EXP_TMP_DATA' ,'FARZ_AUFTRAGSTRANSAKTIONEN' ,'FARZ_FNF_DETAILS' ,'FARZ_MELDUNGEN' ,'ZR_DETAILS_VBOL_WERTE' ,'ZR_DETAILS_HISTORIE' ,'DATENPOOL_ZEITREIHEN_LOG' ,'TAGEBUECHER' ,'PULL_SYNC_READ' ) ) ORDER BY 1 DESC ; select * from ec_mon.RDS$TOMENKE$GROWTH2 t where t.table_name like 'BER_LIS%' ; /* delete from ec_mon.RDS$TOMENKE$GROWTH2 t where t.date_inserted > sysdate -1 ; delete from ec_mon.RDS$TOMENKE$GROWTH1 t where t.date_inserted > sysdate -1 ; commit; update ec_mon.RDS$TOMENKE$GROWTH2 t set t.ignore = NULL where 1=2; */
Tablespace Shrinking online via row movement
-- Table Shrinking Online - Free unused Space ALTER TABLE zr_details_historie ENABLE ROW MOVEMENT; ALTER TABLE zr_details_historie SHRINK SPACE CASCADE; -- 1200 sec / 20 min for 1 Mill Rows, 0.2 GB ALTER TABLE zr_details_historie disable ROW MOVEMENT;
Tablespace Shrinking offline via copying
by moving all tables to new tablespace
-- Analize wasted size WITH wasted AS (SELECT TABLESPACE_NAME, round(SUM(BYTES) / 1024 / 1024, 0) "MB wasted" FROM DBA_FREE_SPACE GROUP BY TABLESPACE_NAME) SELECT df.tablespace_name, df.file_name, df.file_id, round(df.bytes / 1024 / 1024, 0) AS "DF MB", w."MB wasted" FROM dba_data_files df JOIN wasted w ON w.TABLESPACE_NAME = df.TABLESPACE_NAME ORDER BY 5 DESC; -- try to reduce size, usually doesn't work ALTER DATABASE datafile 27 resize 800M; -- Tablespaces mit viel freiem Platz im Datafile suchen SELECT TABLESPACE_NAME, round(SUM(BYTES) / 1024 / 1024 / 1024, 2) "GB wasted" FROM DBA_FREE_SPACE GROUP BY TABLESPACE_NAME ORDER BY 2 DESC; --EC_TBS_LOG hat viel Luft select * from dba_tablespaces where tablespace_name='EC_TBS_LOG'; select * from dba_data_files where tablespace_name='EC_TBS_LOG'; -- /app/oradata/emtectn/ec_tbs_log01.dbf -- Test Online-Verkleinerung alter database datafile '/app/oradata/emtectn/ec_tbs_log01.dbf' resize 1500M; -- geht nicht weil belegt am Ende -> -- DB Jobs deaktivieren select value from v$parameter where name='job_queue_processes'; --10 alter system set job_queue_processes=0; -- Tabellen und Indizes auf Tablespace suchen select * from dba_tables where tablespace_name='EC_TBS_LOG'; --EC_PRT.LINEARE_PROTOKOLLE --EC_PRT.LINEARE_PROTOKOLLE_EXT select * from DBA_INDEXES where tablespace_name='EC_TBS_LOG'; -- neuen Tabelspace anlegen create tablespace test datafile '/app/oradata/emtectn/test.dbf' size 100M autoextend on next 10M; alter user EC_PRT quota unlimited on test; -- Tabellen verschieben alter table EC_PRT.LINEARE_PROTOKOLLE move tablespace test; alter table EC_PRT.LINEARE_PROTOKOLLE_EXT move tablespace test; -- Tablespace verkleinern alter database datafile '/app/oradata/emtectn/ec_tbs_log01.dbf' resize 1000M; alter database datafile '/app/oradata/emtectn/ec_tbs_log01.dbf' resize 100M; alter database datafile '/app/oradata/emtectn/ec_tbs_log01.dbf' resize 10M; -- Tabellen zurück verschieben alter table EC_PRT.LINEARE_PROTOKOLLE move tablespace EC_TBS_LOG; alter table EC_PRT.LINEARE_PROTOKOLLE_EXT move tablespace EC_TBS_LOG; -- Temp Tablespace löschen drop tablespace test including contents and datafiles; -- Indexe wieder reparieren, da durch verschieben UNSUEABLE geworden: select * from dba_indexes where status not in ('VALID','N/A'); --reparieren via alter index EC_PRT.LPL_SCHEDJOBS_AUDSID_I rebuild tablespace EC_TBS_PRTIDX; alter index EC_PRT.LPL_ERRORLEVEL_I rebuild tablespace EC_TBS_PRTIDX; alter index EC_PRT.LPL_PK rebuild tablespace EC_TBS_PRTIDX; alter index EC_PRT.LPL_DATUM_I rebuild tablespace EC_TBS_PRTIDX; alter index EC_PRT.LPL_BESCHR_I rebuild tablespace EC_TBS_PRTIDX; alter index EC_PRT.LPE_LPL_LFDNR_I rebuild tablespace EC_TBS_PRTIDX; -- Jobs wieder starten alter system set job_queue_processes=10;
Index Rebuild
--Index neu erstellen / rebuild ALTER INDEX EC_ZRV.ZRDH_PK REBUILD; ALTER INDEX EC_ZRV.ZRDH_PK REBUILD ONLINE; -- bei Tabellen mit wenig Bewegung
User
-- Infos zu DB Users select username, account_status,created, profile from dba_users order by username -- generate alter user commands for password change of many users SELECT 'alter user ' || username || ' identified by PASSWORD;' || ' --Status = ' || ACCOUNT_STATUS -- 1 liner FROM dba_users WHERE upper(username) LIKE '%menke%' ESCAPE '#' OR oracle_maintained = 'Y' -- Oracle User ORDER BY username; -- Nur Oracle User und schauen ob aktiv SELECT 'alter user ' || username || ' identified by PASSWORD;' || ' --Status = ' || ACCOUNT_STATUS FROM dba_users WHERE 1=1 and oracle_maintained = 'Y' -- Oracle User and ACCOUNT_STATUS not like 'EXPIRED _ LOCKED' ORDER BY username; -- Oracle User with Std / Default Password select * from sys.dba_users_with_defpwd order by username; -- Passwort PW aller User ändern -- Befehle via Select generieren select 'alter user '||username||' identified by <new_pwd>;' from dba_users order by username; -- Liste aller USER select * from dba_users where ORACLE_MAINTAINED = 'N' by 1; -- User anlegen CREATE USER tmenke IDENTIFIED BY passwort; GRANT CREATE SESSION TO tmenke; -- Login Sperren ALTER USER tmenke ACCOUNT LOCK; -- Login EntSperren ALTER USER tmenke ACCOUNT UNLOCK; -- Passwort ändern und auf abgelaufen (=neu zu vergeben) setzen alter user tmenke identified by "Start123" password expire -- Statements generieren um alle non-oracle User zu sperren SELECT 'ALTER USER ' || username || ' ACCOUNT LOCK ;' FROM dba_users WHERE ORACLE_MAINTAINED = 'N' order by username; -- User löschen DROP USER tmenke cascade ; -- Statements generieren um alle non-oracle User zu löschen (vor Datapump Import von Backup) SELECT 'DROP USER ' || username || ' cascade ;' FROM dba_users WHERE ORACLE_MAINTAINED = 'N' order by username; -- Rollen eines Users anzeigen -- your permissions select * from USER_ROLE_PRIVS where USERNAME = USER; select * from USER_TAB_PRIVS where Grantee = USER; select * from USER_SYS_PRIVS where USERNAME = USER; -- granted role permissions select * from ROLE_ROLE_PRIVS where ROLE IN (select granted_role from USER_ROLE_PRIVS where USERNAME = USER); select * from ROLE_TAB_PRIVS where ROLE IN (select granted_role from USER_ROLE_PRIVS where USERNAME = USER); select * from ROLE_SYS_PRIVS where ROLE IN (select granted_role from USER_ROLE_PRIVS where USERNAME = USER); -- check for locked user accounts SELECT username, account_status FROM dba_users ORDER BY username; -- Lock DB User ALTER USER ec_admin ACCOUNT LOCK; -- Unlock DB User ALTER USER ec_admin ACCOUNT UNLOCK; --- Passwortänderung und Unlock alter user menke identified by pwdofmenke ACCOUNT Unlock ; -- Drop User DROP USER menke CASCADE
Sessions
-- laufende DB Jobs anzeigen SELECT * FROM DBA_JOBS_RUNNING; -- job_queue_processes einstellen select value from v$parameter where name='job_queue_processes'; alter system set job_queue_processes = 8 scope = both; -- Empfehlung optimistischer: Anzahl CPU*2 - 2 (= wegen Hyperthreadling - Puffer für Masken etc) Anzahl CPU, zB via select * from v$parameter where name like '%cpu_count%'; -- DBMS Scheduler Jobs - DBA SELECT * FROM dba_scheduler_running_jobs; -- DBMS Scheduler Jobs - User SELECT * FROM user_scheduler_running_jobs; --starten BEGIN job_name => 'DELETE_WF_LOG', dbms_scheduler.create_job ( job_type => 'plsql_block', job_action => 'begin ec_wfb.prc_del_wf_log; end;', ENABLED => TRUE); END; --stoppen exec DBMS_SCHEDULER.stop_JOB (job_name => '<NAME>'); -- oder BEGIN dbms_scheduler.drop_job(job_name => 'DELETE_WF_LOG', FORCE => TRUE); END; -- Langläufer -- aktuell laufende + Fortschritt (%) + ETA SELECT t.opname , t.target , t.username AS "user" , ROUND(t.sofar/t.totalwork,2)*100 AS "pct_done" , ROUND(t.ELAPSED_SECONDS / ( t.sofar/t.totalwork ) /60 ,0) AS "Min, tot" , ROUND( ( t.START_TIME + (t.ELAPSED_SECONDS / ( t.sofar/t.totalwork ) /24/3600 ) - SYSDATE ) *24*60,0) AS "Min to go" , t.START_TIME + (t.ELAPSED_SECONDS / ( t.sofar/t.totalwork ) /24/3600 ) AS "eta" , t.START_TIME AS "start" FROM v$session_longops t WHERE t.sofar!=t.totalwork AND t.totalwork!=0 AND t.sofar != 0 ORDER BY 5 DESC ; --get sid and serial for all sessions of a user SELECT sid, serial# FROM v$session WHERE username = 'EC_TSI'; --get sid and serial for all sessions of an non-oracle users SELECT username, sid, serial# FROM v$session WHERE username in (select username FROM dba_users WHERE ORACLE_MAINTAINED = 'N'); --Kill SESSION alter system kill session '439,22749'; -- oder brutaler: alter system kill session '439,22749' immediate; -- get kill commands for all session of a user SELECT 'alter system kill session || sid || ',' || serial# || ;' FROM v$session WHERE username = 'ec_admin'; -- get kill commands for all session of all non-oracle users SELECT 'alter system kill session || sid || ',' || serial# || ; -- ' || username FROM v$session WHERE 1 = 1 AND username IN (SELECT username FROM dba_users WHERE ORACLE_MAINTAINED = 'N') ORDER BY username; -- session, CPU_TIME und SQL dazu gejoint zum finden langer Sessions SELECT sesion.sid, sesion.serial#, sesion.username, optimizer_mode, hash_value, address, cpu_time, elapsed_time, sql_text FROM v$sqlarea sqlarea, v$session sesion WHERE sesion.sql_hash_value = sqlarea.hash_value AND sesion.sql_address = sqlarea.address AND sesion.username IS NOT NULL ORDER BY cpu_time DESC; -- letzte 24 h SELECT opname, target, COUNT(*) "COUNT", SUM(t.ELAPSED_SECONDS) "SUM", ROUND(AVG(t.ELAPSED_SECONDS), 1) "AVG" FROM v$session_longops t GROUP BY opname, target ORDER BY 3 DESC; --Sessions von Benutzer auflisten SELECT osuser, COUNT(*) FROM v$session t GROUP BY osuser ORDER BY 1; SELECT * FROM v$session t WHERE osuser = 'albrosel';
AWR Snapshot
-- AWR SnapShot Intervall EXEC dbms_workload_repository.modify_snapshot_settings(interval=>10); SELECT extract(DAY FROM snap_interval) * 24 * 60 + extract(hour FROM snap_interval) * 60 + extract(minute FROM snap_interval) AS "Snapshot Interval" , extract(DAY FROM retention) * 24 * 60 + extract(hour FROM retention) * 60 + extract(minute FROM retention) AS "Retention Interval" FROM dba_hist_wr_control; -- AWR Snapshot starten: EXECUTE DBMS_WORKLOAD_REPOSITORY.CREATE_SNAPSHOT();
DB_RECOVERY_FILE
-- Set DB_RECOVERY_FILE ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=20G SCOPE=BOTH; ALTER SYSTEM SET DB_RECOVERY_FILE_DEST='E:\ORACLE\ORADATA\CEMTEC\arch' SCOPE=BOTH;
Tracing
-- Trace von Session starten DBMS_MONITOR.SESSION_TRACE_ENABLE( session_id ,serial_num ,TRUE ,TRUE ) -- Tracing an entire database --Start: ALTER SYSTEM SET sql_trace = true SCOPE=MEMORY; --Stop: ALTER SYSTEM SET sql_trace = false SCOPE=MEMORY; -- Tracing an entire database --Start: ALTER SYSTEM SET sql_trace = true SCOPE=spfile; ALTER SYSTEM SET max_dump_file_size = UNLIMITED SCOPE=spfile; startup force; --Stop: ALTER SYSTEM SET sql_trace = false SCOPE=spfile; ALTER SYSTEM SET max_dump_file_size = <alter Wert> SCOPE=spfile; startup force; -- Session finden zum Tracen SELECT s.sid, s.serial#, s.CLIENT_IDENTIFIER, s.SERVICE_NAME, s.MODULE, s.ACTION, s.username, s.LOGON_TIME, s.STATUS, s.osuser, s.SERVER, s.PROCESS, s.MACHINE, s.TERMINAL, s.PROGRAM, s.TYPE, s.CLIENT_INFO, p.TERMINAL AS p_terminal, p.PROGRAM AS p_program FROM v$session s, v$process p WHERE s.paddr = p.addr AND s.TYPE NOT IN ('BACKGROUND') AND status = 'ACTIVE' ORDER BY; -- service_name='SYS$USERS' ; Module='pkg_ewe_kabs_endur' und/oder Module='eCOUNTprognose.exe'
Apex
-- APEX Versionsnummer --1 select * from apex_release; --2 -- Select current APEX user over all_users/dba_users select max(u1.username) as current_apex_user from all_users u1 where u1.username like 'APEX%' and REGEXP_LIKE (substr(u1.username,'6',1), '^[0-9]*$');
Hacks
-- Zugriff auf Stand von Vergangenheit via Undo Segmente -- benötigt Grant execute on DBMS_FLASHBACK to username; SELECT * FROM email_abos AS OF TIMESTAMP to_timestamp('08-01-2015 07:15', 'dd-mm-yyyy hh24:mi:ss');
Restore Point
= einfacher und schneller als RMAN Backup einspielen. bei Creation (in Open Mode): create restore point DB_23012018_1120 guarantee flashback database; Test: select * from v$restore_point; Flashback to the guaranteed restore point: SQL> shutdown immediate; SQL> startup mount; SQL> flashback database to restore point DB_23012018_1120; SQL> alter database open resetlogs;
Datapump Export Import
CREATE OR REPLACE DIRECTORY "DATA_PUMP_DIR" AS '/app/oracle/backup/datapump-EMTECTN'; select * from dba_directories; GRANT READ, WRITE ON DIRECTORY DATA_PUMP_DIR TO SYSTEM;
Export
-- stop DB jobs select value from v$parameter where name='job_queue_processes'; --10 alter system set job_queue_processes = 0 scope = both; -- Update Stats for better performance exec dbms_stats.gather_dictionary_stats; exec dbms_stats.lock_table_stats (null, 'X$KCCLH'); exec dbms_stats.gather_fixed_objects_stats; exec dbms_stats.gather_system_stats; -- Export via SQPlus . oraenv mydatabase expdp system/xxx full=Y parallel=6 consistent=Y directory=DATA_PUMP_DIR dumpfile=DumpV1_%u.dmp logfile=DumpV1.log -- alternative via parameterfile expdp system/emtectn @string parfile=parfile.par -- restart jobs alter system set job_queue_processes = 10 scope = both;
Import
-- cleanup target DB -- 1. lock all nun-oracle users SELECT 'ALTER USER ' || username || ' ACCOUNT LOCK ;' FROM dba_users WHERE ORACLE_MAINTAINED = 'N' order by username; -- 2. Kill Sessions of all non-oracle users SELECT 'alter system kill session || sid || ',' || serial# || immediate ; -- ' || username FROM v$session WHERE 1 = 1 AND username IN (SELECT username FROM dba_users WHERE ORACLE_MAINTAINED = 'N') ORDER BY username; -- 3. Drop non-oracle users. (preferable in several parallel sessions) SELECT 'DROP USER ' || username || ' cascade ;' FROM dba_users WHERE ORACLE_MAINTAINED = 'N' order by 1; purge recyclebin; alter session set optimizer_mode=choose; -- Update Stats for better performance exec dbms_stats.gather_dictionary_stats; exec dbms_stats.lock_table_stats (null, 'X$KCCLH'); exec dbms_stats.gather_fixed_objects_stats; exec dbms_stats.gather_system_stats; -- Check if DB is empty . oraenv mydatabase sqlplus / as sysdba select username from dba_users where ORACLE_MAINTAINED='N'; -- must be empty! --Import exclude=FUNCTIONAL_INDEX impdp directory=EXPIMP parallel=6 dumpfile=Dump_17-12-14_15-03-%u.dmp logfile=imp_17-12-14_15-03_EFI.log exclude=FUNCTIONAL_INDEX username: / as sysdba -- Sysgrants sqlplus / as sysdba -- @/app/dump/emtectn/sys_grants-emtecp-171127.sql -- Import include=FUNCTIONAL_INDEX impdp directory=EXPIMP parallel=6 dumpfile=Dump_17-12-14_15-03-%u.dmp logfile=imp_17-12-14_15-03_FI.log include=FUNCTIONAL_INDEX username: / as sysdba -- Recompile aller invaliden Objecte: sqlplus / as sysdba @/app/oracle/product/12.1.0.2/ee/rdbms/admin/utlrp.sql
SQL
INSERT
INSERT INTO departments (department_id, department_name) VALUES (30, 'purchasing'); --oder INSERT INTO departments VALUES (100, 'finance', NULL, NULL); INSERT INTO sales_reps (id, NAME, salary, commission_pct) SELECT employee_id, last_name, salary, commission_pct FROM employees WHERE job_id LIKE '%rep%'; INSERT INTO CERT_KEY_KEYS (NAME, DESCRIPTION, RCKA_ID, CKE_ID) VALUES ('JKS-Key', , -2, (SELECT id FROM CERT_KEY_ENTITIES WHERE NAME = 'JKS Entity')); -- viele Inserts in einer Transaktion INSERT all INTO rds$tmenke$ecounttabellen (owner, table_name) VALUES ('EC_ANF', 'ANFRAGEN') INTO rds$tmenke$ecounttabellen (owner, table_name) VALUES ('EC_ANF', 'ANFRAGE_PARAMETER') INTO rds$tmenke$ecounttabellen (owner, table_name) VALUES ('EC_ANF', 'ANFRAGEVERMERKE') INTO rds$tmenke$ecounttabellen (owner, table_name) VALUES ('EC_ANF', 'KUNDEN') SELECT 1 FROM DUAL; -- select muss hier sein -- Insert mit Select kombinieren INSERT INTO CERT_KEY_KEYS (NAME, DESCRIPTION, RCKA_ID, CKE_ID) VALUES ('JKS-Key', , -2, (SELECT id FROM CERT_KEY_ENTITIES WHERE NAME = 'JKS Entity'));
CREATE TABLE
-- Copy Table / Kopie von Tabelle anlegen CREATE table zvw_zaehler_bak as select * from zvw_zaehler; COMMENT ON table zvw_zaehler_bak IS 'Kopie zvw_zaehler vom 26.04.2016 by Torben Menke'; -- structure only, no data create table employees2 as select * from employees where 1=2 ;
Constraints
--Add Constraint on TABLESPACE_NAME and LAST_ANALYZED ALTER TABLE RDS$ToMenke$Monitoring2 ADD CONSTRAINT RDS$ToMenke$Mon2$name_date UNIQUE (TABLE_NAME, LAST_ANALYZED);
Grants
GRANT SELECT ON myTable TO otheruser; -- Change Table Tabelle Owner by copying --user1 GRANT ALL PRIVILEGES ON myTable TO user2; COMMIT; --user2 CREATE TABLE myTable AS SELECT * FROM user1.myTable COMMIT; --user1 DROP TABLE user1.myTable; COMMIT;
UPDATE
UPDATE employees SET (salary) = (10000) WHERE employee_id = 113; UPDATE employees -- for all SET (salary) = (10000); UPDATE employees SET (salary) = (SELECT salary FROM employees WHERE employee_id = 205) WHERE employee_id = 113; --Replace --Replace: wirkt nur auf die Spalten einer Tabelle, die einen Match haben update EMAIL_ABOS set ABO_BEZEICHNUNG = replace(ABO_BEZEICHNUNG,'24T1','24-T1') ; update EMAIL_ABOS set Anhangdateiname = replace(Anhangdateiname,'24T1','24-T1') ; -- Zeilen schützen auch bei Select möglich select employee_id, salary, commission_pct, job_id from employees where job_id = 'SA_REP' for update -- hier wird sperre eingefügt order by employee_id;
-- Update mit Select kombinieren UPDATE employees SET (salary) = (SELECT salary FROM employees WHERE employee_id = 205) WHERE employee_id = 113; -- Update all UPDATE employees -- for all SET (salary) = (10000);
DELETE
DELETE FROM departments WHERE department_name = 'Finance'; DELETE FROM departments; -- delete all (schneller: TRUNCATE TABLE departments;) -- Datumsfunktionen ALTER SESSION SET NLS_DATE_FORMAT='DD.MM.YYYY HH24:MI:SS'; TO_CHAR (lp.datum,'YYYY-MM') -- unperformant TRUNC(t.von,'mon') -- -> 1.2.2015 -- Day Of Week (1 = Montag, 7 = Sonntag) 1 + TRUNC(t.von) - TRUNC(t.von, 'IW') "WT" -- List of Dates SELECT TRUNC (SYSDATE - ROWNUM) dt FROM DUAL CONNECT BY ROWNUM <= 31 -- delete all delete from copy_emp; -- or truncate (=faster, but no rollback possible) truncate table copy_emp; -- Delete mit Select kombinieren DELETE FROM tab2 WHERE tab2.id IN (SELECT id FROM tab1 WHERE id < 100);
NLS_Session_parameters / Date Format
-- Spracheinstellungen anzeigen select * from NLS_Session_parameters ; -- oder SQL Developer -> Tools -> Preferences -> Database -> NLS alter session set nls_language = 'german'; -- nur sprache alter session set nls_territory = 'germany'; -- alle localizations -- wieder std zurück alter session set nls_language = 'american'; -- nur sprache alter session set nls_territory = 'america'; -- alle localizations ALTER SESSION SET NLS_DATE_FORMAT='DD.MM.YYYY HH24:MI:SS';
String Zeichenketten Functions
-- String Zeichenketten Functions http://docs.oracle.com/cd/E17952_01/refman-5.1-en/string-functions.html -- Zeichenketten zusammenfügen -- spalte1 || spalte2 = concat (spalte1,spalte2) -- upper lower initcap -- Platzhalter in Zeichenketten nur in LIKE -- % Steht für eine beliebige Folge von keinem oder mehreren Zeichen -- _ Steht für ein beliebiges einzelnes Zeichen -- Platzhalter durch Escape (hier !) select employee_id, last_name, job_id from employees where job_id like '%SA!_%' escape '!' ;
Substitution Variables / Austauschvariablen
-- Eingabedialog -> Variable via &variable (nur SQL*Plus / SQL Developer) select employee_id, last_name, salary, department_id from employees where employee_id = &employee_num -- zB 143 ; select employee_id, last_name, salary, department_id from employees where last_name = '&last_name' --zB Matos ; -- && verwenden, wenn der Variablenwert wiederverwendet werden soll, ohne dass der Benutzer jedes mal zur Eingabe aufgefordert wird accept enumber number prompt 'Bitte Employee ID eingeben :' select first_name, last_name, salary from employees where employee_id=&enumber ;
synonym
-- synonym erspart tippen create synonym e for employees ; select last_name, first_name from e ; select * from user_synonyms ; drop synonym e;
DUAL
-- ist eine öffentliche Tabelle, in der Ergebnisse aus Funktionen und Berechnungen angezeigt werden. select round(45.923,2), round(45.923,0), round(45.923,-1) from dual ; select 'asdfghj' as zeichenkette from dual ; select substr('asdfghj',1,3) from dual ; select trim ('a' from 'asdffdsa') as both from dual ; select replace('asdfghj','a','b') from dual ;
Date Functions
--datum-zeugs -- datum-datum = tage -> week via as weeks select last_name, (sysdate-hire_date)/7 as weeks from employees ; select to_date ('12-jan-1981') + 10000 from dual ; -- formate: http://docs.oracle.com/cd/E16655_01/server.121/e17209/sql_elements004.htm#SQLRF51079 --FM = no leading ' ' or 0 select to_char(to_date ('12-jan-1981') + 10000, 'DD.MM.YYYY') from dual ; select add_months ('29-jan-1981',1) -- Vorsicht: 31.1. +1mon = 29.2. :-( from dual ; select last_day ('29-jan-1981') -- letzer Tag des Monats from dual ;
CASE
-- A select case select last_name, job_id, salary, case job_id -- nur bei select case when 'IT_PROG' then 1.10*salary when 'ST_CLERK' then 1.15*salary when 'SA_REP' then 1.20*salary else salary end as "Revised_Salary" from employees; -- B search case select last_name, job_id, salary, case -- bei search case keine Angabe des Feldes hier when salary < 5000 then 'Low' when salary < 10000 then 'Med' when salary < 15000 then 'High' else 'Excellent' end as "Gehaltseinteilung" from employees; -- Alternative : Decode (=Oracle-spezifisch) -- nur select, kein search -- besser case verwenden select last_name, job_id, salary, decode ( job_id ,'IT_PROG' , 1.10*salary ,'ST_CLERK', 1.15*salary ,'SA_REP' , 1.20*salary ,salary) as revised_salary from employees ; select last_name, salary, decode (trunc(salary/2000, 0), -- sal/2000 und komma abschneiden 0, 0.00, 1, 0.09, --falls trunc(sal/2000,0) == 1 -> gib 0.09 zurück 2, 0.20, 3, 0.30, 4, 0.40, 5, 0.42, 6, 0.44, 0.45) --else as tax_rate from employees ;
Gruppenfunktionen
/* avg count max min sum listagg stddev variance */
select count (salary) as gehalt from employees ; -- Anzahl Gehälter
select count (distinct salary) as gehalt from employees ; -- Anzahl verschiedene Gehälter -- Die Funktion NVL zwingt Gruppenfunktionen, Nullwerte einzubeziehen: (hier durch null->0) select avg(nvl(commission_pct, 0)) from employees ; select department_id, round(avg(salary)) from employees group by department_id -- muss nicht im select beinhaltet sein order by 1 ; select department_id, job_id, sum(salary) from employees group by department_id, job_id -- doppelte Gruppierung (mehr als 2 ist nicht möglich) order by job_id ; -- Gruppe mit Einschränkung select department_id, max(salary) from employees where job_id not like '%REP%' --Zeilenfilter kommt vor Gruppenfilter group by department_id having max(salary)>10000 -- Gruppenfilter ; -- Gruppenfunktionen verschachteln select max(avg(salary)) from employees group by department_id ; -- CountIF select count (*) as "Total" , sum( case when to_char(hire_date,'YYYY') = 2005 then 1 else 0 end ) as "2005" from employees ; -- SumIf select sum( case when department_id=20 then salary else NULL end ) as "Dep20" from employees group by job_id order by job_id ; --ListAgg -> Elemente zusammen pappen select department_id, listagg (last_name, ',') within group (order by last_name) as empl from employees group by department_id ;
JOINs
/* Typen von Joins: INNER JOIN gibt nur übereinstimmende Zeilen zurück (self) JOIN Tab1.col1 mit Tab1.col2 Es gibt drei Typen von OUTER Joins LEFT OUTER gibt auch nicht übereinstimmende der 1. Tabelle mit zurück (mit NULL-Wert) RIGHT OUTER ... 2. FULL OUTER CROSS JOIN Kreuzprodukt / kartesisches Produkt = Alle möglichen Kombinationen von Tab1 mit Tab2 -> Ergebnis ist sehr groß... */ select department_name, city from departments natural join locations -- location_id kommt in beiden Tabs vor ; -- selbiges unter Verwendung von alias select d.department_name, l.city from departments d natural join locations l ; -- selbiges via "join using" select department_name, city from departments join locations using (location_id) ; -- selbiges via "join on" select d.department_name, l.city from departments d join locations l on (d.location_id = l.location_id) ; -- 3 Tabellen joinen select e.employee_id, l.city, d.department_name from employees e join departments d on d.department_id = e.department_id join locations l on d.location_id = l.location_id ; -- joinen über 2 Schlüssel select j.employee_id, j.start_date, j.job_id from employees e join job_history j on (j.employee_id = e.employee_id and e.hire_date = j.start_date) order by j.employee_id ; -- Alte Oracle Syntax select e.last_name, d.department_name from employees e, departments d where e.department_id = d.department_id --wenn hier (+) angegeben, dann outer join ; --self-join select e.last_name as "Malocher", b.last_name as "Boss" from employees e join employees b -- aliasnamen on (e.manager_id = b.employee_id) ; --non-equal join : Zahl -> Bereich -> Sclüssel select e.last_name, e.salary, j.grade_level from employees e join job_grades j on e.salary between j.lowest_sal and j.highest_sal ; -- outer join select e.last_name, e.department_id, d.department_name from employees e left outer join departments d on (e.department_id = d.department_id) ; --cross join = karthesisches Produkt select e.last_name, d.department_name from employees e cross join departments d; --alte Oracle Syntax: select e.last_name , d.department_name from employees e , departments d -- where e.department_id = d.department_id ; -- where als alternative zu join on: -- gibt mir depid, name, city, region etc! select d.department_id, d.department_name, l.city, c.country_name, r.region_name from departments d join locations l using (location_id) join countries c using (country_id) join regions r using (region_id) order by d.department_id
SubQueries / Unterabfragen
select last_name, salary from employees where salary > ( -- single row unterabfrage select salary from employees where last_name = 'Abel' -- prob wenn 2 den Namen Abel haben: besser where salary > all ( -- performanter: select max (salary) in Unterabfrage ) ; select last_name, salary, department_id from employees where salary in -- in liste ( select min(salary) from employees group by department_id ) ; select employee_id, last_name, job_id, salary from employees where job_id != 'IT_PROG' and salary < any --Vergleich wird mit jedem Element der Untergruppe verglichen, muss für ein beliebiges Element eintreffen ( select salary from employees where job_id = 'IT_PROG' ) ; select employee_id, last_name, job_id, salary from employees where job_id != 'IT_PROG' and salary < all --Vergleich wird mit jedem Element der Untergruppe verglichen, muss für alle eintreffen ( select salary from employees where job_id = 'IT_PROG' ) ; select employee_id, last_name, job_id, salary from employees where job_id != 'IT_PROG' and salary < ( select min (salary) from employees where job_id = 'IT_PROG' ) ; --WHERE EXISTS -> TRUE falls Unterabfrage min einen Treffer hat select employee_id,salary,last_name from employees m where exists ( select employee_id from employees w where (w.manager_id=m.employee_id) and w.salary > 10000 ) ; -- für jede Hauptabfrage wird die Unterabfrage einmal ausgführt -- In _jeder_ Zeile der Tabelle employees wird die Bedingung geprüft, ob eine manager_id mit einem Gehalt über $10.000 existiert. -- geht vielleicht auch kürzer -- Problem mit "NOT IN": bei NULL Werten --alle dep ohne mitarbeiter select d.department_name from departments d where not exists ( select e.department_id from employees e where e.department_id = d.department_id ) ; select d.department_name from departments d where d.department_id not in ( select e.department_id from employees e -- WICHITG!!! weil oberes NOT IN gibt false wenn null in Ergebnismenge where e.department_id is not null -- Alternative : NullWert fixen: -- select NVL(e.department_id,0) ) ; -- Alternative mit NOT EXISTS select d.department_name from departments d where not exists ( select 'X' from employees e where d.department_id = e.department_id ) ; -- schöne Alternative mit MengenOP (Lession 9) select d.department_name from departments d where d.department_id in ( select d.department_id from departments d minus select distinct e.department_id from employees e ) ;
Mengen Operatoren
-- =interessant für Unterabfragen -- UNION (keine doppelten) ACHTUNG: sortiert die Ergebnisse -- UNION ALL (auch doppelte) ACHTUNG: keine Sortierung -- INTERSECT (Schnittmenge) -- MINUS (TabA-TabB) -- aktuelle u. ehemalige Tätigkeiten aller Mitarbeiter select employee_id, job_id from employees union select employee_id, job_id from job_history ; --welcher MA haben wieder den gleichen Job select employee_id, job_id from employees intersect select employee_id, job_id from job_history ; -- Personalnummern der Mitarbeiter anzeigen, die noch nie die Tätigkeit gewechselt haben select employee_id from employees minus select employee_id from job_history ;
transaction controll
SAVEPOINT name -- oracle only COMMIT ( Transaktionen werden übernommen + entfernt auch savepoints ) ROLLBACK [to SAVEPOINT]
WITH
with dept_costs as ( -- DepName | Sum(Salary) select d.department_name , sum(e.salary) as dept_total from employees e join departments d on e.department_id = d.department_id group by d.department_name ) , avg_cost as ( -- eine Zahl, berechnet via dept_costs von oben select sum(dept_total)/count(*) as dept_avg from dept_costs ) select * from dept_costs where dept_total > (select dept_avg from avg_cost) order by department_name ;
PIVOT
(ab Oracle 11g)
WITH pivot_data AS ( SELECT deptno, job, salvali FROM emp ) SELECT * FROM pivot_data PIVOT ( SUM(sal) --<-- pivot_clause FOR deptno --<-- pivot_for_clause IN (10,20,30,40) --<-- pivot_in_clause ); --Alternative Syntax ohne WITH SELECT * FROM ( SELECT deptno, job, sal FROM emp ) PIVOT ( SUM(sal) --<-- pivot_clause FOR deptno --<-- pivot_for_clause IN (10,20,30,40) --<-- pivot_in_clause );
CUBE
select department_id, job_id, sum(salary), count(employee_id) from employees e where e.department_id < 60 -- einfach: -- group by (department_id, job_id) -- erweitert via cube group by cube (department_id, job_id) -- cube erzeugt /* 1. Gruppierung über nix (*,*) 2. Gruppierung über beides (A,B) , wie normales group by 3.+4. Gruppierung auf (A,*) und (*,B) dabei wird * -> NULL */ ;
Regular Expressions
select first_name, last_name from employees where regexp_like(first_name,'^ste(ph|v)en$','i') -- i = case-insensitive ; select last_name, phone_number as phone1 , regexp_replace(phone_number, '(\d+)\.(\d+)\.(\d+)', '\3-\2-\1') as phone2 from employees ;
Views
create view empvu80 as select employee_id, last_name, salary from employees where department_id = 80 ; select * from empvu80; select * from user_views; select text from user_views where view_name = 'EMPVU80'; drop view empvu80; -- Create or replace view -- or replace: behält beim ersetzen die Zugriffsrechte anderer Nutzer auf meine Tabelle! -- update view and rename columns create or replace view empvu80 (id_number, name, sal, department_id) as select employee_id, first_name || ' ' || last_name, salary, department_id from employees where department_id = 80 ; -- WITH CHECK OPTION CONSTRAINT = WICHTIG!!! -- stellt sicher, dass constraint eingehalten wird, hier depID=20 create or replace view empvu20 as select * from employees where department_id = 20 with check option constraint empvu20_ck ;