SQL - Structured Query Language

 1. create database "database_name":

create database learning
with owner = postgres
encoding = 'UTF8'
connection limit = -1;

2. create table "table_name":

create table customers (

customer_id serial primary key,

first_name varchar(50),

last_name varchar(50),

email varchar(150),

age int

);

3. select * from "table_name":

select * from customers;

4. insert into "table_name" (column_name1, column_name2...) values ('value1', 'value2'):

insert into customers (first_name, last_name, email, age)

values ('Adnan', 'Waheed', 'a@b.com', 40);

5. insert into "table_name" (column_name1, column_name2...) 

values 

('value1', 'value2'),

('value1', 'value2'),

('value1', 'value2'):

insert into customers (first_name, last_name)

values

('Adnan', 'Oktar'),

('John', 'Adams'),

('Linda', 'Abe');

6. insert a data with quotes:

insert into customers (first_name)

values

('Bill''O Sullivan');

7.  returning: to get info on returns rows

insert into customers (first_name)

values

('Joseph') returning *;

8. update "table_name" set "column_name" = 'new_value' where "column_name" = 'value':

update customers 

set email = 'a2@b.com'

where customer_id = 1;


update customers

set email = 'a4@b.com',

age = 30

where customer_id = 1;


update customers

set email = 'a@b.com'

where customer_id = 3

returning *;


update customers

set is_enable = 'Y';


9. delete from "table_name"

where column_name = 'value':

delete from customers

where customer_id = 9;


delete from customers; // bu butun melumati silir

10. upsert:

syntax: insert into table_name (column_list)

values (value_list)

on conflict target action;


insert into t_tags (tag)

values ('Pen')

on conflict (tag)

do nothing;


insert into t_tags (tag)

values ('Pen')

on conflict (tag)

do update set

tag = excluded.tag,

update_date = now();


11. Concatenating: ||

select first_name || ' ' || last_name as "Full Name" from actors;


12. Order by: asc, desc

select * from movies

order by release_date asc;


select * from movies

order by release_date desc;


select first_name name,

last_name surname

from actors

order by surname;


14. Length:

select first_name as name,

length(first_name) as uzunluq

from actors;


select first_name as name,

length(first_name) uzunluq

from actors

order by 1, 2;


15. Order by with Null values:

select * from demo_sorting

order by num nulls first;


select * from demo_sorting

order by num nulls last;


16. Distinct:

select distinct 

movie_lang from movies;


select distinct * from movies

order by movie_id asc;


select count(distinct movie_lang)

from movies;


17.  And:

select * from movies 

where movie_lang = 'English'

and age_certificate = '18';


select * from movies
where movie_lang = 'English'
and director_id = '8';

18. Or:

select * from movies
where movie_lang = 'English'
or movie_lang = 'Chinese'
order by movie_lang;

select * from movies
where (movie_lang = 'Chinese'
or movie_lang = 'English')
and age_certificate = '12';

19. ! We can not user aliases in the where clause.
select first_name,
last_name surname
from actors
where surname = 'Allen';

20. Logical operator >, >=, <, <=, <>, !=
select * from movies
where movie_length > 100;

select * from movies
where movie_length >= 100;

select * from movies
where movie_length < 100;

select * from movies
where movie_length <= 100;

select * from movies
where movie_lang <> 'English';

select * from movies
where movie_lang != 'English';

21.Limit
select * from movies
where movie_lang != 'English'
limit 10;

select * from actors
where gender = 'F'
order by date_of_birth desc
limit 10;

select * from movies_revenues
order by revenues_domestic desc
nulls last
limit 5;
22.Offset: haradan bawlamaq istediyimizi gostermek ucundur
select * from movies
limit 5 offset 4;

23.Fetch
select * from movies
fetch first 1 row only;

select * from movies
fetch first row only;

select * from movies
fetch first 5 row only;

select * from movies
order by movie_length desc
fetch first 5 row only;

select * from actors
where gender = 'F'
order by date_of_birth desc
fetch first 10 row only;

select * from movies
fetch first 1 row only;

select * from movies
order by movie_length desc
fetch first 5 row only
offset 5;

24. In
instead of writing this:
select * from movies
where movie_lang = 'English'
or movie_lang = 'Chinese'
or movie_lang = 'Japanese'
order by movie_lang;
we should use this:
select * from movies
where movie_lang in ('English', 'Chinese', 'Japanese')
order by movie_lang;

select * from movies
where age_certificate in('13', 'PG')

25. Not In
select * from movies
where director_id not in ('13', '10');

26. Between
select * from movies
where movie_length between 100 and 200
order by movie_length

27. Not Between
select * from movies
where movie_length not between 100 and 200
order by movie_length

*** like is case sensitive
*** ilike is not case sentive

28. is null, is not null
select * from actors
where date_of_birth is null
or first_name is null;
select * from movies_revenues
where revenues_domestic is null;
select * from movies_revenues
where revenues_domestic is null
or revenues_international is null
select * from movies_revenues
where revenues_domestic is not null;
select * from actors
where date_of_birth = null;
*** we can't use = 'null': it is not allowed
29. Concatenation ||, concat, concat_ws:
select length(concat(first_name, last_name))
from actors;

select 'Hello' || 'World' as new_string;
select 'Hello' || ' ' || 'World' as new_string;
select concat(first_name, last_name) as "Actorf Full Name"
from actors;
select concat(first_name, ' ', last_name) as "Actorf Full Name"
from actors;
select concat_ws(',', first_name, last_name) as "Actorf Full Name"
from actors;
30. Boolean: has three values: true, false, null
true -> TRUE, 'true', 't', 'y', 'yes', '1'
false -> FALSE, 'false', 'f', 'n', 'no', '0'
insert into table_boolean (is_available)
values (TRUE);
insert into table_boolean (is_available)
values (TRUE), ('true'), ('t'), ('y'), ('yes'), ('1');

this automatically means true:
select * from table_boolean
where is_available
and false:
select * from table_boolean
where not is_available

31. Character: 
Three main types of Character data:
Character(n), Char(n) -> fixed length, blank padded
Character varying(n), varchar(n) -> variable-length with length limit
Text, varchar -> variable unlimited length
select cast('Adnan' as character(10)) as "Name1",
'Adnan'::char(10) as "Name2";
By default if we write simple char then it is by default 1 character:
select 'Adnan'::char as "Name";
But id we define varchar, there is no limit:
select 'Adnan'::varchar as "Name";
Text: unlimited length approx. 1GB.
32. Numeric:
Numeric columns can hold various type numbers, but not null values.
Serial: auto-increment integer data type: smallserial, serial, bigserial.
create table table_serial(
product_id serial,
product_name varchar(100)
);
33. Decimal: (precision, scale)
precision -> max number of digits to the left and right of the decimal point
scale -> number of digits allowable on the right of the decimal point
create table table_numbers (
col_numeric numeric (20, 5),
col_real real,
col_double double precision
);
real -> has 6 decimal digits precision;
double precision -> 15 decimal digits precision;
34. Date/Time: 
Date -> date only
Time -> date only
Timestamp -> date and time
Timestamptz -> date, time and timestamp (with timezone)
Interval -> store difference

Date: uses 4 bytes to store date value, by default uses the format YYYY-MM-DD
create table table_dates(
id serial primary key ,
employee_name varchar(100) not null ,
hire_date date not null ,
add_date date default current_date
);

Time: uses 8 bytes to store date value

35. to_char
-- Convert an integer to a string
select release_date,
to_char(release_date, 'DD-MM-YYYY'),
to_char(release_date, 'Dy, MM, YYYY')
from movies;

36. Create domain
create domain addr varchar(100) not null
create table locations (
address addr
)

create domain positive_numeric int not null check ( value > 0)
create table sample (
sample_id serial primary key,
value_num positive_numeric
)

insert into sample (value_num) values(10)

create domain us_postal_code as text
check (
value ~'^\d{5}$'
or value ~'^\d{5}-\d{4}$'
)

create domain proper_email varchar(150)
check (value ~* '&[A-Za-z0-9._%-]+@[A-Za-z0-9.-]+[.][A-Za-z]+$')

create domain valid_color varchar(10)
check ( value in ('red', 'green', 'blue'))

37. Drop a Domain data type
Drop Domain name
drop domain positive_numeric cascade

drop domain valid_color
38. Create Type
-- Create a address composite data type
create type address as
(
city varchar(50),
country varchar(50)
)

create table companies (
comp_id serial primary key,
address address
)

insert into companies (address) values (row ('London', 'UK'))

select * from companies;

select address from companies;

select (companies.address).country from companies;

-- Create a composite 'inventory_item' data type
create type inventory_item as
(
product_name varchar(200),
supplier_id int,
price numeric
)

create table inventory
(
inventory_id serial primary key,
item inventory_item
)

insert into inventory (item) values (row('paper', 20, 10.99))

select (inventory.item).product_name from inventory where (item).price > 3.99

39. Create a currency ENUM data type with currency data
create type currency as enum ('usd', 'eur', 'gbp')
alter type currency add value 'chf' after 'eur'

create table stocks
(
stock_id serial primary key,
stock_currency currency
)

insert into stocks (stock_currency) values ('usd')

-- Drop type name
create type sample_type as enum ('abc', '123')

drop type sample_type

-- Alter data types
create type myaddress as
(
city varchar(50),
country varchar(20)
)
-- Rename a data type
alter type myaddress rename to my_address
-- Change the owner
alter type my_address owner to postgres
-- Change the schema
alter type my_address set schema test_scm
-- To add a new attribute
alter type test_scm.my_address add attribute street_address varchar(150)
40. Alter an ENUM data type
-- Update a value
create type mycolors as enum ('green', 'red', 'blue')
alter type mycolors rename value 'red' to 'orange'
-- List all enum values
select enum_range (null::mycolors)
-- To add a new value
alter type mycolors add value 'red' before 'green'
41. Update an ENUM data in a production server
create type status_enum as enum ('queued', 'waiting', 'running', 'done')

create table jobs(
job_id serial primary key,
job_status status_enum
)

insert into jobs (job_status) values ('queued')

update jobs set job_status = 'running' where job_status = 'waiting'

alter type status_enum rename to status_enum_old

create type status_enum as enum ('queued', 'running', 'done')

alter table jobs alter column job_status type status_enum using job_status::text::status_enum

drop type status_enum_old
42. An ENUM with a default value
create type status as enum ('pending', 'approved', 'declined')

create table cron_jobs(
cron_job_id int,
status status default 'pending'
)

insert into cron_jobs (cron_job_id) values ('1')

---------------------------------------------------------------------------------------------
                                Constraints
Types of constraints 
NOT NULL - field mush have values
UNIQUE - Only unique values are allowed
DEFAULT - Ability to set default values
PRIMARY KEY - Uniquely identifies each row/record in a database table
FOREIGN KEY - Constrains data based on columns in other tables
CHECH - Checks all values meet specific criterium
1. Not Null constraints
create table table_nn (
id serial primary key,
tag text not null
)

insert into table_nn (tag) values ('Adam')

insert into table_nn (tag) values (null)

create table table_nn2 (
id serial primary key,
tag text
)

alter table table_nn2
alter column tag set not null;

2. Unique constraints
create table emails (
id serial primary key ,
email text unique
)

insert into emails (email) values ('a@b.com')
-- Create unique key on multiple columns
create table products (
id serial primary key ,
product_code varchar(10),
product_name text
)

alter table products
add constraint unique_product_code unique (product_code, product_name)

3. Default constraint
create table employees(
employee_id serial primary key ,
first_name varchar(50),
last_name varchar(50),
is_enable varchar(2) default 'Y'
)

insert into employees(first_name, last_name) VALUES ('John', 'Adam')
-- Set a default value to an existing table
alter table employees
alter column is_enable set default 'N'
-- Drop a default value
alter table employees
alter column is_enable drop default

4. Primary key constraint
There can be more Unique columns, but only one primary key in a table
When multiple fields are used as a primary key, they are called a composite key
create table table_items(
item_id integer primary key,
item_name varchar(100) not null
)

insert into table_items (item_id, item_name)
values (1, 'Pen')
-- Drop a constraint
alter table table_items
drop constraint table_items_pkey;
-- Alter table and add Primary key
alter table table_items
add primary key (item_id, item_name)

5. Primary key constraints on multiple columns = composite primary key
create table t_grades (
course_id varchar(100) not null,
student_id varchar(100) not null,
grade int not null,
primary key (course_id, student_id)
);

insert into t_grades (course_id, student_id, grade) values
('Math', 's1', 70),
('Chemistry', 's1', 70),
('English', 's2', 70),
('Physics', 's1', 80);
-- drop a primary key
alter table t_grades
drop constraint t_grades_pkey;
-- add a primary key
alter table t_grades
add constraint t_grades_course_id_session_id_pkey
primary key (course_id, student_id);

6. Foreign key constraint
-- The table name in PostreSQL foreign key concept states that the child table contains the 
foreign key and another table which has foreign key reference is called parent table.
create table t_suppliers
(
supplier_id int primary key,
supplier_name varchar(100) not null
);

create table t_products
(
product_id int primary key,
product_name varchar(100) not null,
supplier_id int not null,
foreign key (supplier_id) references t_suppliers (supplier_id)
)
-- If we want to delete some data, we have to first delete from foreign table and then after 
child table.

-- How to drop a constraint
alter table t_products
drop constraint t_products_supplier_id_fkey;

-- Add or update foreign key constraint on existing table
First of all we have to drop foreign key and then add.
alter table t_products
add constraint t_products_supplier_id_fkey foreign key
(supplier_id) references t_suppliers (supplier_id);
 
7. Check constraint
The Check constraint uses a Boolean expression to evaluate the values before they 
are inserted or updated to the column. 
create table staff
(
staff_id serial primary key,
first_name varchar(50),
last_name varchar(50),
birth_date date check ( birth_date > '1900-01-01' ),
joined_date date check ( joined_date > staff.birth_date ),
salary numeric check ( salary > 0 )
);

insert into staff (first_name, last_name, birth_date, joined_date, salary)
values ('Adam', 'King', '1999-01-01', '2002-01-01', 100);
-- Define Check constraint for existing table
create table prices (
price_id serial primary key,
product_id int not null,
price numeric not null,
discount numeric not null,
valid_from date not null
);
-- price > 0 and discount >= 0 and price > discount
alter table prices
add constraint price_check
check (
price > 0
and discount >= 0
and price > discount
)
-- rename a constraint on a table
alter table prices
rename constraint price_check to price_discount_check;
-- drop a constraint
alter table prices
drop constraint price_discount_check;


-------------------------------------------------------------------------------------
                                    Sequence
1. Create sequence
create sequence if not exists test_seq;
-- Advance sequence and return new value
select nextval('test_seq');
-- Select current value
select currval ('test_seq');
-- Set a sequence
select setval ('test_seq', 100);
-- Set a value and not skip over
select setval('test_seq', 200, false);
-- Control the sequence start value
create sequence if not exists test_seq2 start with 100;
2. Alter a sequence
-- Alter sequence name restart with value
-- Alter sequence name rename to new_name
alter sequence test_seq restart with 100;

alter sequence test_seq rename to my_seq4;
3. Use multiple sequence parameters to create a sequence
-- Create sequence name
-- Start with value
-- Increment value
-- Minvalue value
-- Maxvalue value
create sequence if not exists test_seq3
increment 50
minvalue 400
maxvalue 6000
start with 500;

select nextval('test_seq3');
4. Specify the data type of a sequence (Smallint | Int | Bigint)
-- Default is bigint
-- Create sequence if not exists name as data_type
-- Create sequence name as data_type
create sequence if not exists test_seq_smallint as smallint;
create sequence if not exists test_seq_smallint as int;
create sequence if not exists test_seq4;
5. Create a Descending sequence and cycle | no cycle
-- Create sequence seq_des
-- Increment -1
-- Minvalue 1
-- Maxvalue 3
-- Start 3
-- Cycle;
create sequence seq_des
increment -1
minvalue 1
maxvalue 3
start 3
cycle;

create sequence seq_des1
increment -1
minvalue 1
maxvalue 3
start 3
no cycle;
6. Delete/Drop a sequence
-- Drop sequence name
drop sequence test_seq2;
7. Attaching sequence to a table
create table users (
user_id serial primary key,
user_name varchar(50)
);

alter sequence users_user_id_seq
restart with 100;

insert into users (user_name) values ('Parvin');

select * from users;


--
create table users2 (
user_id int primary key,
user_name varchar(50)
);

create sequence users2_user_id_seq
start with 100 owned by users2.user_id;

alter table users2
alter column user_id set default nextval('users2_user_id_seq');

insert into users2 (user_name) values ('Parvin');
insert into users2 (user_name) values ('Parvin2');

select * from users2;
-- Share sequence among tables
create sequence common_fruits_seq start with 100;

create table apples (
fruit_id int default nextval('common_fruits_seq') not null,
fruit_name varchar(50)
);

create table mangoes (
fruit_id int default nextval('common_fruits_seq') not null,
fruit_name varchar(50)
);

insert into apples (fruit_name) values ('red');
insert into mangoes (fruit_name) values ('big');

select * from apples;
select * from mangoes;
-- Create and alphanumeric sequence
create sequence table_seq;

create table contacts(
contact_id text not null default ('ID' || nextval('table_seq')),
contact_name varchar(50)
);

alter sequence table_seq owned by contacts.contact_id;

insert into contacts (contact_name) values ('Parvin');

select * from contacts;

--------------------------------------------------------------------------------------
                                String functions
1. Upper, Lower, Initcap
select upper('parvin');

select upper (first_name) as first_name,
upper(last_name) as last_name
from directors;

select lower('PARVIN');
select initcap('hello from developers');

select initcap(concat(first_name, ' ', last_name))
as full_name
from directors
order by first_name;
2. Left and Right functions
-- Left('something', n) function returns the first n characters in the string
select left('alfa', 1);
-- Left('something', -n) function returns all characters except the last n characters
select left('parvin', -2);
-- Get initial for all directors name
select left(first_name, 1) as initial
from directors order by 1;

select left (first_name, 1) as initial,
count(*) as total_initials
from directors
group by 1
order by 1;
-- Right() function returns the last n characters in the string.
select right ('parvin', 1);
select right ('parvin', -1);
-- Find all directors where their last name ends with 'on'
select last_name, right(last_name, 2)
from directors
where right(last_name, 2) = 'on';
3. Reverse
select reverse('parvin');
4. Split_part
This function splits a string on a specific delimiter
split_part(string, delimiter, position);
select split_part('1, 2, 3', ',', 2);
select split_part('a|b|c|d', '|', 3);
-- Get the release year of all the movies
select movie_name, release_date,
split_part(release_date::text, '-', 1) as release_year
from movies;
5. Lpad Rpad
Lpad - pads a string on the left to a specified length with a sequence of characters.
Rpad - pads a string on the right to a specified length with a sequence of characters.
select lpad('Database', 15, '*');
select rpad('Database', 15, '*');
-- Lets draw a quick chart on movies total revenues
6. Length 
Returns the number of characters or the number of bytes of a string
select length('amazing postgres');

select length(cast(124589 as text));





---------------------------------------------------------------------------------------------
Grouping Data
1. Group by: group by eliminates duplicate values from the results, similar to distinct. 
It removes duplicate and takes only unique. And we can use group by without aggregate function.
select column1
aggregate function(column2)
from tablename
group by column1;

-- get total count of all movies group by movie language
select movie_lang, count(movie_lang)
from movies
group by movie_lang;
-- get average movie length group by movie language
select movie_lang, avg(movie_length)
from movies
group by movie_lang
order by movie_lang;
-- get the sum total movie length per age certificate
select age_certificate, sum(movie_length)
from movies
group by age_certificate
order by age_certificate;
-- list minimum and maximum movie length group by movie language
select movie_lang, min(movie_length), max(movie_length)
from movies
group by movie_lang;

2. Using group by with multiple columns, order by
-- using more than 1 columns in select
-- get average movie length group by movie language and age certification
select movie_lang, age_certificate, avg(movie_length)
from movies
group by movie_lang, age_certificate
order by age_certificate;

select age_certificate, movie_lang, avg(movie_length)
from movies
group by movie_lang, age_certificate
order by age_certificate;
* We have to include group by to all columns

-- lets filter some records too
-- get average movie length group by movie language and age certification where movie length
greater than 100
select movie_lang, age_certificate, avg(movie_length)
from movies
where movie_length > 100
group by movie_lang, age_certificate;

-- get average movie length group by movie age certificate where age certificate = 10
select age_certificate, avg(movie_length)
from movies
where age_certificate = '10'
group by age_certificate;

-- how many directors are there per each nationality
select nationality, count(*)
from directors
group by nationality
order by 2 desc;

-- get total sum movie length for each age certificate and movie language combination
select age_certificate, movie_lang, sum(movie_length)
from movies
group by age_certificate, movie_lang
order by 3;

*** We can not use group by on aggregate function column.
3.Using Having clause:
We use Having clause to specify a search condition for a group or an aggregate.
The Having clause is often used with the group by clause to filter rows based on filter
conditions.
You can't use column aliases in the Having clause, as Having clause is evaluated before the 
select clause.
Having clause calculates on aggregate function and not actual select columns.

select column1,
aggregate_function(column2)
from tablename
group by column1
having condition;

-- List movies languages where sum total length of the movies is greater than 200
select movie_lang, sum(movie_length)
from movies
group by movie_lang
having sum(movie_length) > 100
order by 2;
select movie_lang, sum(movie_length)
from movies
group by movie_lang
having sum(movie_length) > 100
order by sum(movie_length);

-- List directors where their sum total movie length is greater than 200
select director_id, sum(movie_length)
from movies
group by director_id
having sum(movie_length) > 200;

-- Can we use column aliases with Having clause? Answer: NO
select director_id, sum(movie_length) as tml
from movies
group by director_id
having tml > 200;

4. Having vs Where
Having works on result group.
Where works on select columns and not on result group.
-- Get the movie languages where their sum total movie length is greater than 200.
select movie_lang, sum(movie_length)
from movies
group by movie_lang
having sum(movie_length) > 200;
-- Lets try using where
select movie_lang, sum(movie_length)
from movies
where sum(movie_length) > 200
group by movie_lang;
ERROR: aggregate functions are not allowed in where

5. Handling Null values with group by
create table employees_test (
employee_id serial primary key,
employee_name varchar,
department varchar,
salary int
);

-- How many employees are there for each group
select department, count(*)
from employees_test
group by department;
-- Lets handle null values
COALESCE(source, '')
select coalesce(department, '* no department *') as department, count(*)
from employees_test
group by department
order by department;

---------------------------------------------------------------------------------------------
Joining Multiple Tables
1. Inner Joins:
select * 
from table_a
join table_b
on table_a.key_column = table_b.foreign_key_column

-- Lets combine 'movies' and 'directors' tables
select m.movie_id, m.movie_name, m.director_id
from movies m
inner join directors d on m.director_id = d.director_id;

-- Lets filter some records
select m.movie_id, m.movie_name, m.director_id
from movies m
inner join directors d on m.director_id = d.director_id
where m.movie_lang = 'English'
and d.director_id = 3;

2. Inner joins with using:
-- We use USING only when joining tables have the same column names, rather than ON
-- Lets say the same column name is column1

select 
table1.column1,
table2.column1, 
from table1
inner join table2 using (column1);

-- Lets connect 'movies' and 'directors' table with USING keyword
select * from movies
inner join directors using (director_id);
select * from movies
inner join movies_revenues using(movie_id);
-- We can connect more than two tables
-- Connect 'movies', 'directors' and 'movies_revenues' tables
select * from movies
inner join directors using(director_id)
inner join movies_revenues using (movie_id);

3. Inner joins with filter data:
-- Select movie name, director name, domestic revenues for all japanese movies
select m.movie_name,
d.first_name,
d.last_name,
r.revenues_domestic
from movies m
inner join directors d on m.director_id = d.director_id
inner join movies_revenues r on m.movie_id = r.movie_id
where m.movie_lang = 'Japanese';

-- Select movie name, director name for all English, Chinese and Japanese movies where 
domestic revenues is greater than 100
select m.movie_name,
d.first_name,
d.last_name,
r.revenues_domestic
from movies m
inner join directors d on m.director_id = d.director_id
inner join movies_revenues r on m.movie_id = r.movie_id
where m.movie_lang in('English', 'Japanese', 'Chinese')
and r.revenues_domestic > 100;

-- Select movie name, director name, movie language, total revenues for all top 5 movies  
select m.movie_name,
d.first_name,
d.last_name,
(r.revenues_domestic + r.revenues_international) total
from movies m
inner join directors d using (director_id)
inner join movies_revenues r using (movie_id)
order by 4 desc nulls last
limit 5;

-- What are the top 10 most profitable movies between year 2005 and 2008. Print the movie name,
director name, movie language, total revenue.
select m.movie_name, m.movie_lang,
m.release_date,
d.first_name, d.last_name,
mr.revenues_domestic, mr.revenues_international,
(mr.revenues_domestic + mr.revenues_international) as "Total Revenue"
from movies m
inner join directors d on m.director_id = d.director_id
inner join movies_revenues mr on m.movie_id = mr.movie_id
where m.release_date between '2005-01-01' and '2008-12-31'
order by 7 desc nulls last
limit 10;

4. How to Inner Join tables with different columns data types
-- Lets create a table with int data type
create table t1 (test int);

-- Lets create a table with varchar data type
create table t2 (test varchar(10));

-- Can we join them?
Yes we can only if the type coincide.
-- If different columns data types, how can we join?
we will use cast.
select * from t1
inner join t2 on t1.test = cast(t2.test as int);

5. Left Joins
-- * Returns every row from the Left table plus rows that match values in the joined column 
from the right table. 
-- * When a left table row doesn't have a match in the right table, the result shows no values 
from the right table.
create table left_products (
product_id serial primary key,
product_name varchar(100)
);

create table right_products (
product_id serial primary key,
product_name varchar(100)
);

insert into left_products (product_id, product_name) values
(1, 'Computers'),
(2, 'Laptops'),
(3, 'Monitors'),
(5, 'Mics');

insert into right_products (product_id, product_name) values
(1, 'Computers'),
(2, 'Laptops'),
(3, 'Monitors'),
(4, 'Pen'),
(7, 'Papers');

-- Lets join the table with Left Join
-- common column is product_id
select * from left_products
left join right_products on left_products.product_id = right_products.product_id;

-- Using movies tables for joins
-- List all the movies with directors first and last names, and movie name
select d.first_name, d.last_name, m.movie_name from movies m
left join directors d on m.director_id = d.director_id;
-- Lets reverse the tables directors and movies
select d.first_name, d.last_name, m.movie_name from directors d
left join movies m on m.director_id = d.director_id;
-- Lets add a record in our directors table
insert into directors (first_name, last_name, date_of_birth, nationality) values
('James', 'David', '2010-01-01', 'American');

select d.first_name,
d.last_name,
m.movie_name
from directors d
left join movies m on d.director_id = m.director_id;

select d.first_name, d.last_name, m.movie_name
from movies m
left join directors d on m.director_id = d.director_id;

-- Can we add a where condtions, say get list of english and shinese movies only
select d.first_name,
d.last_name,
m.movie_name,
m.movie_lang
from directors d
left join movies m on d.director_id = m.director_id
where m.movie_lang in ('English', 'Chinese');

-- Count all movies for each directors
select d.first_name, d.last_name,
count(*) as "total_movies"
from directors d
left join movies m on d.director_id = m.director_id
group by d.first_name, d.last_name;

-- Get all the movies with age certification for all directors where nationalities are 'American',
'Chinese' and 'Japanese'
select *
from directors d
left join movies m on d.director_id = m.director_id
where d.nationality in ('American', 'Chinese', 'Japanese');

-- Get all the total revenues done by each films for each directors
select d.first_name, d.last_name,
sum (r.revenues_international + r.revenues_domestic) as "total revenue"
from directors d
left join movies m on d.director_id = m.director_id
left join movies_revenues r on r.movie_id = m.movie_id
group by d.first_name, d.last_name
order by 3 desc nulls last;

6. Right join
--* Returns every row from the Right table plus rows that match values in the joined column
from the Left table.
--* When a right table row doesn't have a match in the left table, the result shows no values
from the left table.

-- Lets join left_product and right_products via right join.
select * from left_products
right join right_products rp on left_products.product_id = rp.product_id;

-- List all the movies with directors first and last names, and movie name
select d.first_name, d.last_name,
m.movie_name
from directors d
right join movies m on m.director_id = d.director_id;

-- Lets reverse the tables directors and movies
select d.first_name, d.last_name,
m.movie_name
from movies m
right join directors d on m.director_id = d.director_id;

-- Count all movies for each directors
select d.first_name,
d.last_name,
count(*) as "total movies"
from directors d
right join movies m on d.director_id = m.director_id
group by d.first_name, d.last_name
order by 3 desc;

-- Get all the total revenues done by each firms for each directors

7. Full Join
-- Returns all the data from table1 and table2.
-- Lets join left_products and right_products via right join
select * from left_products lp
full join right_products rp on lp.product_id = rp.product_id;

-- List all the movies with directors first and last names, and movie name
select d.first_name,
d.last_name,
m.movie_name
from directors d
full join movies m on d.director_id = m.director_id
order by d.first_name;

-- Lets reverse the tables directors and movies
select d.first_name,
d.last_name,
m.movie_name
from movies m
full join directors d on d.director_id = m.director_id
order by d.first_name;
The result will be the same. Because it takes all the data.

-- Can we add a Where conditions, say get list of english and chinese movies only
select d.first_name,
d.last_name,
m.movie_name
from movies m
full join directors d on d.director_id = m.director_id
where m.movie_lang in ('English', 'Chinese')
order by d.first_name;

8. Joining multiple tables
-- Lets join movies, directors and movies revenues tables
select *
from movies m
join directors d on m.director_id = d.director_id
join movies_revenues mr on m.movie_id = mr.movie_id;

-- Do the order of the tables joining matters?
select *
from directors d
join movies m on m.director_id = d.director_id
join movies_revenues mr on m.movie_id = mr.movie_id;
The answer is: no
-- Lets join movies actors, directors, movies revenues together
select * from actors a
join movies_actors ma on a.actor_id = ma.actor_id
join movies m on ma.movie_id = m.movie_id
join directors d on m.director_id = d.director_id
join movies_revenues mr on m.movie_id = mr.movie_id;

-- Is join is same as inner join?
select * from actors a
inner join movies_actors ma on a.actor_id = ma.actor_id
inner join movies m on ma.movie_id = m.movie_id
inner join directors d on m.director_id = d.director_id
inner join movies_revenues mr on m.movie_id = mr.movie_id;
*** inner join = join
*** inner join is the default if you don't specify the type when you use the word join.
9. Self Join
-- Self Join allows you to compare rows within the same table.
-- Lets self join left_products table
select * from left_products t1
inner join left_products t2 on t1.product_id = t2.product_id;
-- Lets self join directors table
select * from directors d1
inner join directors d2 on d1.director_id = d2.director_id;
-- Lets self join finds all pair of movies that have the same movie length
select t1.movie_name,
t2.movie_name,
t1.movie_length
from movies t1
inner join movies t2
on t1.movie_length = t2.movie_length
and t1.movie_name <> t2.movie_name
order by t1.movie_length desc ;
-- Lets query hierarchical data like all directors and movies
10. Cross Join
-- In a Cross Join query, the result (also known as a Cartesian product) lines up each row in
the left table with each row in the right table to present all possible combinations of rows.
-- Lets Cross Join lef_product and right_product
select *
from left_products
cross join right_products
-- Lets cross join actors with directors
select *
from actors
cross join directors;
11. Natural Join
-- A natural join is a join that creates an implicit join based on the same column names in 
the joined tables. A natural join can be an inner join, left join or right join.
If you do not specify a join explicitly, then PostgreSQL will use the Inner join by default.

-- Lets join left_products and right_products
select * from left_products
natural join right_products;
equivalent:
select *
from left_products
inner join right_products using (product_id)
------------------------------------------------------------------------------------
                              Combining queries together
1. Union
The Union operator combines result sets from two or more select statements 
into a single result set.
To be able to use Union 
- The order and number of the columns in the select list of all queries must be the same.
- The data types must be compatible too.

-- Lets use Union of left_products and right_products tables
select product_id, product_name
from left_products
union
select product_id, product_name
from right_products;
- Union doesn't return duplicate records

-- What if we need duplicate records too?
select product_id, product_name
from left_products
union all
select product_id, product_name
from right_products;

-- Lets combine directors and actors tables together
select first_name,
last_name
from directors
union
select first_name,
last_name
from actors;

-- Can we use Union with Order by
select first_name,
last_name
from directors
union
select first_name,
last_name
from actors
order by first_name asc;

-- Lets combine all directors where nationality are American, Chinese and Japanese
with all female actors.
select first_name,
last_name,
'directors' as "tablename"
from directors
where nationality in ('American', 'Chinese', 'Japanese')
union
select first_name,
last_name,
'actors' as "tablename"
from actors
where gender = 'F';

-- Select the first name and last name of all directors and actors who are born after 1990
select first_name,
last_name,
date_of_birth,
'directors' as "tablename"
from directors
where date_of_birth > '1970-12-31'
union
select first_name,
last_name,
date_of_birth,
'actors' as "tablename"
from actors
where date_of_birth > '1970-12-31';

-- Select the first name and last name of all directors and actors where their 
first name starts with 'A'
select first_name,
last_name,
'directors' as "tablename"
from directors
where first_name like 'A%'
union
select first_name,
last_name,
'actors' as "tablename"
from actors
where first_name like 'A%';

-- Can we combine different numbers of columns from each queries?
NO. Each Union query must have the same number of columns

2. Unioning tables with different number of columns
create table table1(
col1 int,
col2 int
);

create table table2(
col3 int
);

select col1, col2 from table1
union
select null as col1, col3 from table2;

3. Intersect
The Intersect operator returns any rows that are available in both result set.

To be able to use Intersect 
- The order and number of the columns in the select list of all queries must be the same.
- The data types must be compatible too.

-- Lets use Intersect on left_products and right_products tables
select product_id,
product_name
from left_products
intersect
select product_id,
product_name
from right_products;

-- Lets intersect first name and last name of directors and actors tables
select first_name,
last_name
from actors
intersect
select first_name,
last_name
from directors;

4. Except
The Except operator returns the rows in the first query that do not appear 
in the output of second query

To be able to use Except 
- The order and number of the columns in the select list of all queries must be the same.
- The data types must be compatible too.

-- Lets use Except on left_products and right_products tables
select product_id,
product_name
from left_products
except
select product_id,
product_name
from right_products;

-- Lets Except first name and last name of directors and actors tables
select first_name,
last_name
from actors
except
select first_name,
last_name
from directors;

-- List all the directors first name, last name unless they have the same 
first name in female actors
select first_name,
last_name
from directors
except
select first_name,
last_name
from actors
where gender = 'F';
--------------------------------------------------------------------------------------
                                        Schema
1. Change schema
alter table humanresource.orders set schema public;
2. Schema search path
-- How to view the current schema?
select "current_schema"();
3. Schema Ownership
alter schema humanresources owner to pervin;
4. Grant Usage on Schema
grant usage on schema private to parvin;
grant select on all tables in schema private to parvin;
--------------------------------------------------------------------------------------
                                    Arrays
1. For ranges
select int4range(1, 4) @> int4range(2, 3)  as "contains",
daterange(current_date, current_date + 30) @> current_date + 15 as "contains value",
numrange(1.6, 5.2) && numrange(0,4);

2. Inclusion operators (@>, <@, &&)
select
array [1, 2, 3, 4] @> array [2, 3, 4] as "Contains",
array ['a', 'b'] <@ array ['a', 'b', 'c'] as "Contained by",
array [1, 2, 3, 4] && array [2, 3, 4] as "Is overlap";
3. Array constructions
-- With || concatenation
select
array [1, 2, 3] || array [4, 5, 6] as "Combine array";
select
array_cat(array [1, 2, 3], array [4, 5, 6]) as "Combine arrays via Array_cat";
-- Add an item to an array
select
4 || array [1, 2, 3] as "Adding to an array";
select
array_prepend(4, array [1, 2, 3]) as "Using prepend";

select
array [1, 2, 3] || 4 as "Adding to an array";

select
array_append(array [1, 2, 3], 4) as "Using append";
4. Array metadata
-- Array_ndims
select
array_ndims(array [[1], [2]]) as "Dimensions";

-- Array_dims
select
array_dims(array [[1], [2]]);

-- Array_length
select
array_length(array [1, 2, 3, 4], 1);
select
array_length(array []::integer[], 1);

-- Array_lower
select
array_lower(array [1, 2, 3, 4], 1);

-- Array_upper
select
array_upper(array [1, 2, 3, 4], 1);

-- Cardinality
select
cardinality(array[[1], [2], [3], [4]]),
cardinality(array[[1], [2], [3], [4], [5]]);

-- Array search function
Array_position
select
array_position(array ['Jan', 'Feb', 'March', 'April'], 'Feb');
select
array_position(array ['Jan', 'Feb', 'March', 'April', 'Feb'], 'Feb', 3);
select
array_positions(array ['Jan', 'Feb', 'March', 'April', 'Feb'], 'Feb');
-- Remove element
select
array_remove(array [1, 2, 3, 4], 4);
-- Array Replace
select
array_replace(array [1, 2, 3, 4, 2], 2, 5);
-- IN
select
20 in (1, 30 ,20) as "Result";

select
25 in (1, 30, 20) as "Result";
-- Not In
select
20 not in (1, 30 ,20) as "Result";

select
25 not in (1, 30, 20) as "Result";
-- All
select
25 = all(array [20, 25, 30, 35]) as "Result";

select
25 = all(array [25, 25]) as "Result";
-- Any/Some
select
25 = any(array [1, 2, 25]) as "Result";

select
20 = any(array [1, 2, 25, null]) as "Any with nulls";

select
25 <> any(array[1, 2, 25, null]) as "!= any with nulls";

select
25 = some(array [1, 2, 3, 4]) as "= some";

select
25 = some(array [1, 2, 3, 4, 25]) as "= some";

-- String to text array
select
string_to_array('1, 2, 3, 4', ',');
-- Setting a value to null
select
string_to_array('1,2,3,4,ABC', ',', 'ABC');

-- Setting an empty value to null
select
string_to_array('1,2,3,4,,6', ',', '');
-- Array to a string
select
array_to_string(array[1,2,3,4], '|');
5. Using arrays in tables
-- Lets create a table with array data
create table teachers (
teacher_id serial primary key,
name varchar,
phone text[]
);

create table teachers1 (
teacher_id serial primary key,
name varchar,
phone text array
);

-- Insert data into arrays
insert into teachers (name, phone)
values
(
'Adam', array ['(111)-222-3333', '(555)-666-7777']
);

insert into teachers (name, phone)
values
('Linda', '{"(111)-222-3333"}'),
('Jeff', '{"(222)-555-3333", "(444)-456-6868"}');
-- Query array data
select
name,
phone
from teachers;

select
name,
phone[1]
from teachers;

select
*
from teachers
where phone [1] = '(111)-222-3333';

select
*
from teachers
where '(111)-222-3333' = any (phone);

-- Modify array contents
update teachers
set phone [2] = '(800)-123-4567'
where teacher_id = 2;

-------------------------------------------------------------------------------------
                         JSON(Java Script Object Notification)
1. How will you represent a JSON object in PostgreSQL
select '{"title":"The lord of the rings"}';
-- Cast data type to make it JSON
select '{"title":"The lord of the rings"}'::json;
-- Create a table called "books"
create table books (
book_id serial primary key,
book_info jsonb
);
-- Insert single record
insert into books (book_info)
values ('
{
"title": "Book title1",
"author": "author1"
}');
-- Insert multiple records
insert into books (book_info)
values ('
{
"title": "Book title1",
"author": "author1"
}'),
('{
"title": "Book title1",
"author": "author1"
}'),
('{
"title": "Book title1",
"author": "author1"
}');
-- The -> operator returns the JSON object field as a field in quotes
select book_info from books;

select book_info -> 'title' from books;
-- The operator ->> returns the JSON object field as text
select book_info ->> 'title', 
book_info ->> 'author'
from books;
-- Select and filter data
select book_info ->> 'title',
book_info ->> 'author'
from books
where book_info ->> 'author' = 'author1';

-- Update JSON data
-- Lets insert a sample record
insert into books (book_info) values
('{"title":"Book title 10", "author":"author10"}');
-- Lets update a record
update books
set book_info = book_info || '{"author":"Adnan"}'
where book_info ->> 'author' = 'author10';

update books
set book_info = book_info || '{"title":"The future 1.0"}'
where book_info ->> 'author' = 'author10';

-- Add additional field like "Best Seller" with boolean value
update books
set book_info = book_info || '{"Best Seller":"true"}'
where book_info ->> 'author' = 'Adnan'
returning *;
-- Lets add multiple key/value category, pages etc.
update books
set book_info = book_info || '{"Best Seller":"true", "pages":250}'
where book_info ->> 'author' = 'Adnan'
returning *;
-- Delete Best Seller boolean key/value
update books
set book_info = book_info - 'Best Seller'
where book_info ->> 'author' = 'Adnan'
returning *;
-- Add a nested array data in JSON e.g. availability_locations like "New York"
update books
set book_info = book_info || '{"availability_locations": ["New York", "New Jersey"]}'
where book_info ->> 'author' = 'Adnan'
returning *;
-- Delete from arrays via path '#-'
update books
set book_info = book_info #- '{availability_locations, 1}'
where book_info ->> 'author' = 'Adnan'
returning *;
-- Create JSON from table
-- Lets output directors table into JSON format
select row_to_json(directors) from directors;
select row_to_json(t) from
(
select
director_id,
first_name,
last_name,
nationality
from directors
) as t;

-- Use json_agg() to aggregate data
-- Lets list movies for each directors
select *,
(select json_agg(x)
from (select movie_name
from movies
where director_id = directors.director_id) as x)
from directors;
-- Select director_id, first_name, last_name and all movies created by that director
select director_id,
first_name,
last_name,
(select json_agg(x)
from (select movie_name
from movies
where director_id = directors.director_id) as x)
from directors;

-- Build a JSON array
-- Lets build a sample array with numbers
select json_build_array(1,2,3,4,5);
-- Can we do strings and numbers together
select json_build_array(1,2,3,4,5, 'Hi');
-- Build array with key and value
select json_build_object(1,2,3,4,5,6,7,'Hi');
-- Can we supply key/value style data?
select json_build_object('name','Adnan','email','a@b.com');
select json_object('{name,email}', '{"Adnan","a@b.com"}');
-- Create documents from data
create table directors_docs(
id serial primary key,
body jsonb
);
-- lets get all movies by each directors in JSON array format
select
director_id,
first_name,
last_name,
date_of_birth,
nationality,
(
select json_agg(x) as all_movies from
(
select
movie_name
from movies
where director_id = directors.director_id
) x
)
from directors;

select row_to_json(a)::jsonb 
from (select director_id,
first_name,
last_name,
date_of_birth,
nationality,
(select json_agg(x) as all_movies
from (select movie_name
from movies
where director_id = directors.director_id) as x)
from directors) as a;
-- Insert the data
insert into directors_docs (body)
select row_to_json(a)::jsonb
from (select director_id,
first_name,
last_name,
date_of_birth,
nationality,
(select json_agg(x) as all_movies
from (select movie_name
from movies
where director_id = directors.director_id) as x)
from directors) as a;

-- Null values in JSON documents
select jsonb_array_elements(body->'all_movies') from directors_docs
where (body->'all_movies') is not null;

select jsonb_array_elements(body->'all_movies') from directors_docs;

-- Count total movies for each director
select *,
jsonb_array_length(body->'all_movies')
as totoal_movies
from directors_docs
order by jsonb_array_length(body->'all_movies') desc;
-- List all the keys within each JSON row
jsonb_object_keys
select *,
jsonb_object_keys(body)
from directors_docs;
-- jsonb_each
select
j.key,
j.value
from directors_docs, jsonb_each(directors_docs.body) j;
-- Turning JSON document to table format
jsonb_to_record
select
j.*
from directors_docs, jsonb_to_record(directors_docs.body) j(
director_id int,
first_name varchar,
nationality varchar
);
-- The existence operator
-- Find all first name equal to 'John'
select * from directors_docs
where body->'first_name' ? 'John';
-- The Containment operator
select *
from directors_docs
where body -> 'all_movies' @> '[{"movie_name":"Toy Story"}]';
-- JSON search 
-- Find all records where first name starting with capital 'J'
select *
from directors_docs
where body ->> 'first_name' like 'J%';
-- Find all records where director_id is greater than 2
select *
from directors_docs
where (body ->> 'director_id')::integer > 2;
-- Find all records where director_id is in 1,2,3,4,5 and 10
select *
from directors_docs
where (body ->> 'director_id')::integer in (1,2,3,4,5,10);


--------------------------------------------------------------------------------------
                                Index
1. Create an Index
-- Lets create an index on order_date on order table
create index idx_orders_order_date on orders (order_date);
2. Create Unique indexes
-- Primary key and indexes
-- Lets create unique index on products table on product_id
create unique index idx_u_products_product_id on products (product_id);
-- Lets create a unique index on employees table on employee_id
create unique index idx_u_employees_employee_id on employees (employee_id);
-- How about unique on multiple columns
-- customers > order_id, customer_id
create unique index idx_u_oders_order_id_customer_id on orders (order_id, customer_id);
-- 
create table t1 (
id serial primary key,
tag text
);

create unique index idx_u_t1_tag on t1 (tag);

insert into t1 (tag) values ('a'), ('b');
-- all indices
select *
from pg_indexes
where schemaname = 'public';
-- Size of the table index
select
pg_indexes_size('orders');
select pg_size_pretty(pg_indexes_size('orders'));
-- all stats
select *
from pg_stat_all_indexes
where relname = 'orders'
order by relname, indexrelname;
-- Drop an Index
Cascade
If the index has dependent objects, you use the cascade option to automatically drop 
these objects and all objects that depends on those objects.
Restrict
The restrict option instructs PostgreSQL to refuse to drop the index if any objects 
depend on it. The drop index uses restrict by default.
Concurrently
When you execute the drop index statement, PostgreSQL acquires an exclusive lock on 
the table and block other accesses until the index removal completes.
drop index idx_suppliers_region;
-- SQL Execution stages
Four stages
-----------
parser  - handles the textual form of the statement (the SQL text) and verifies whether
          it is correct or not
rewriter  - applying any syntactic rules to rewrite the original SQL statement
optimizer  - finding the very fastest path to the data that the statement needs
executor  - responsible for effectively going to the storage and retrieving (or inserting)
            the data gets physical access to the data

--------------------------------------------------------------------------------------
                                    Views
1. Creating a view
-- Create a view to include all movies with directors first and last names
-- Lets look at normal approach
create or replace view v_movie_quick as
select movie_name,
movie_length,
release_date
from movies mv;
create or replace view v_movies_directors_all as
select
mv.movie_id,
mv.movie_name,
mv.movie_length,
mv.movie_lang,
mv.age_certificate,
mv.release_date,
mv.director_id,
d.first_name,
d.last_name,
d.date_of_birth,
d.nationality
from movies mv
inner join directors d on d.director_id = mv.director_id;
-- How to use view for query datasets?
select * from v_movie_quick;
select * from v_movies_directors_all;
2. Rename a view
alter view v_movie_quick rename to v_movie_quick2;
3. Delete a view
drop view v_movie_quick;
4. Using conditions / filters with views
-- Create a view to list all movies released after 1997
create or replace view v_movies_after_1997 as
select * from movies
where release_date >= '1997-12-31'
order by release_date desc;
-- Select all movies with english language only from the view
select *
from v_movies_after_1997
where movie_lang = 'English';
-- Select all movies with directors American and Japanese nationality
-- without views
select * from movies mv
inner join directors d on mv.director_id = d.director_id
where d.nationality in ('American', 'Japanese');
-- with views
*** We do not need to worry about giving join table aliases in views, 
instead we just reference the column name
select * from v_movies_directors_all
where nationality in ('American', 'Japanese');
-- A view using select and union with multiple tables
-- Lets have a view for all people in a movie like actors and directors with first
and last name
create view v_all_actors_directors as
select
first_name,
last_name,
'actors' as people_type
from actors
union all
select
first_name,
last_name,
'directors' as people_type
from directors;
-- Connecting multiple tables with a single view
-- Lets connect movies, directors, movies revenues table with a single view
create view v_movies_directors_revenues as
select
mv.movie_id,
mv.movie_name,
mv.movie_length,
mv.movie_lang,
mv.age_certificate,
mv.release_date,
d.director_id,
d.first_name,
d.last_name,
d.nationality,
d.date_of_birth,
r.revenue_id,
r.revenues_domestic,
r.revenues_international
from movies mv
inner join directors d on d.director_id = mv.director_id
inner join movies_revenues r on r.movie_id = mv.movie_id;

select * from v_movies_directors_revenues;
5. Changing views
-- A regular view 
* does not store data physically
* always give updated data
-- Lets insert some data
insert into directors (first_name)
values ('test name1');

delete from directors where director_id = 39;
6. An updatable view with CRUD operations
-- Lets create an updatable view for directors table
create or replace view vu_directors as
select first_name, last_name
from directors;
-- Lets add some records via a view and not from the underlying table
insert into vu_directors (first_name)
values ('dir1'), ('dir2');
-- Lets check the contents of directors table via view
select * from vu_directors;
-- Lets delete some records via a view and not from the underlying table
delete from vu_directors
where first_name = 'dir1';
7. Update view using with check option
-- Lets create a table with countries
create table countries (
country_id serial primary key,
country_code varchar(4),
city_name varchar(100)
);
-- Lets insert some sample data to base table
insert into countries (country_code, city_name)
values ('US', 'New York'),
('US', 'New Jersey'),
('UK', 'London');
-- Now lets create a simple view called v_cities_us to list all US based cities
create or replace view v_cities_us as
select
country_id,
country_code,
city_name
from countries
where country_code = 'US';
-- Now lets insert some US based cities via our view
insert into v_cities_us(country_code, city_name)
values ('US', 'California');
-- What if a user added a UK based location in our view
insert into v_cities_us(country_code, city_name)
values ('UK', 'Manchester');
-- Lets update our view v_cities_us using with check option
create or replace view v_cities_us as
select country_id,
country_code, city_name
from countries
where country_code = 'US'
with check option;
-- Now, lets try to add a UK based city say 'Leeds'
insert into v_cities_us (country_code, city_name)
values ('UK', 'Leeds');
-- Lets try the update operation on view having with check option, can we add data
update v_cities_us
set country_code = 'UK'
where city_name = 'California';
8. Using Local and Cascaded in with check option
-- Lets create a view for all cities starting with letter 'C'
create or replace view v_cities_c as
select country_id,
country_code, city_name
from countries
where city_name like 'C%';
-- We will create a new simple view v_cities_us_c, to list;
-- all cities starting with 'C'
-- and within country code US
create or replace view v_cities_c_us as
select country_id,
country_code, city_name
from v_cities_c
where country_code = 'US'
with local check option;
-- Lets try to insert a data for a US city
insert into v_cities_c(country_code, city_name)
values ('US', 'Connecticut');
insert into v_cities_c(country_code, city_name)
values ('US', 'Los Angeles');
-- Now lets try Cascaded and see what is the effect 
create or replace view v_cities_c_us as
select country_id,
country_code, city_name
from v_cities_c
where country_code = 'US'
with cascaded check option;
-- Now lets try to insert some US based city but with city name starting other than 'C'
insert into v_cities_c_us(country_code, city_name)
values ('US', 'Boston');
*** With cascaded view option clause, the PostgreSQL checks not only the condition for 
current view but also all the underlying views
9. Materialized View
-- Create a materialized view
create materialized view if not exists
mv_directors as
select first_name, last_name
from directors
with data;
create materialized view if not exists
mv_directors_nodata as
select first_name, last_name
from directors
with no data;
** With no data, the view is flagged as unreadable. It means that you cannot query 
data from the view until you load data into it.
refresh materialized view mv_directors_nodata;

select * from mv_directors_nodata;
-- Drop a materialized view
drop materialized view mv_directors_nodata;
-- Changing materialized view data
** We cannot directly change data in materialized view
-- How to check if a materialized view is populated or not?
create materialized view
mv_directors2 as select
first_name from directors
with no data;

select relispopulated
from pg_class where
relname = 'mv_directors2';
-- How to refresh data in a meterialized view
-- Lets create a materialized view of table directors
create materialized view mv_directors_us as
select director_id, first_name, last_name,
date_of_birth, nationality
from directors
where nationality = 'American'
with no data;

refresh materialized view mv_directors_us;
-- Create a unique index say on director_id for view mv_directors_us as requirement
for Concurrently
create unique index idx_u_mv_directors_us_director_id
on mv_directors_us (director_id);
-- Now it is possible to use concurrently
refresh materialized view concurrently mv_directors_us;
10. Using materialized view for a website page clicks analytics
-- Lets create the page_clicks table
create table page_clicks
(
rec_id serial primary key,
page varchar(200),
click_time timestamp,
user_id bigint
);
-- Lets populate some sample data with 10,000 rows of fake data
insert into page_clicks(page, click_time, user_id)
select
(
case (random() * 2) :: int
when 0 then 'klickanalytics.com'
when 1 then 'clickapis.com'
when 2 then 'google.com'
end
) as page,
now() as click_time,
(floor(random() * (111111111-1000000 + 1) +1000000)) :: int as user_id
from generate_series(1, 10000) seq;
-- To analyse a daily trend i.e. how many pages were clicks per day, so lets create 
a materialized view
create materialized view mv_page_clicks as
select date_trunc('day', click_time) as day,
page, count(*) as total_clicks
from page_clicks
group by day, page;
-- and to refresh the view we can use
refresh materialized view mv_page_clicks;
-- Now image the above materialized view data will become bigger and bigger every single
day with growing amount of data, so it will be better to split the view into daily
materialized view
create materialized view mv_page_clicks_daily as
select click_time as day,
page, count(*) as cnt
from page_clicks
where click_time >= date_trunc('day', now())
and click_time < timestamp 'tomorrow'
group by day, page;
-- Lets create a unique index
create unique index
idx_mv_page_clicks_daily_day_page
on mv_page_clicks_daily (day, page);

--------------------------------------------------------------------------------------
                                    Subqueries
1. Filtering with Subqueries in a where clause
Lets take a scenario.
a. Find the movies average movie length and then
b. use the results from the first query and then use select to find movies which are 
more than average movie length

select movie_name, movie_length
from movies
where movie_length >
(select avg(movie_length)
from movies)
order by movie_length;
-- Can we filter the previous records for english movies only
select movie_name, movie_length
from movies
where movie_length >
(select avg(movie_length)
from movies
where movie_lang = 'English')
order by movie_length;
-- Get first and last name of all actors who are younger than Douglas Silva
select * from actors
where date_of_birth >
(select date_of_birth from actors
where first_name = 'Douglas');

2. Subquery with in operator
-- What if inner-query or subquery is returning multiple records?
-- Find all movies where domestic revenues are greater than 200
select movie_name, movie_lang
from movies
where movie_id in (select movie_id
from movies_revenues
where revenues_domestic > 200);
-- Find all movies where domestic revenues are higher than the international revenues
select movie_name, movie_lang
from movies
where movie_id in (select movie_id
from movies_revenues
where revenues_domestic > revenues_international);
3. Subquery with JOINs
-- List all the directos shere their movies made more than the average total revenues 
of all english movies
-- Lets understand outer and inner query
-- outerquery : List all the directors
-- innerquery : movies made more than the average total revenues of all english movies
select d.director_id,
sum(r.revenues_domestic + r.revenues_international) as
"total_revenues"
from directors d
inner join movies m on d.director_id = m.director_id
inner join movies_revenues r on m.movie_id = r.movie_id
where (r.revenues_domestic + r.revenues_international) >
(
select
avg(revenues_domestic + revenues_international)
as "avg_total_revenues"
from movies_revenues
)
group by d.director_id;
4. Order entries in a union without order by
select * from (select first_name, 0 myorder, 'actors' as "tablename"
from actors
union
select first_name, 1, 'directors' as "tablename"
from directors) t
order by myorder;
select *
from
(select
         * from movies) t;5. Subquery with an alias

5. A select without from
select (
select max (revenues_domestic)
from movies_revenues
);
6. Correlated Subqueries
-- List movie name, movie language and movie age certification for all movies with
an above minimum length of for each age certification
select movie_name, movie_lang,
movie_length,
age_certificate
from movies mv1
where mv1.movie_length >
(select min(movie_length)
from movies mv2
);
select movie_name, movie_lang,
movie_length,
age_certificate
from movies mv1
where mv1.movie_length >
(select min(movie_length)
from movies mv2
where mv1.age_certificate = mv2.age_certificate
);
-- List first name, last name and date of birth for the oldest actors for each gender
select first_name, last_name,
date_of_birth, gender
from actors ac1
where date_of_birth >
(select min(date_of_birth)
from actors ac2
where ac1.gender = ac2.gender)
order by ac1.gender, ac1.date_of_birth;
7. Using In with subquery
-- Find suppliers that are same countries as customers
select * from suppliers
where country in (
select country from customers);
-- Find customers that are same city as suppliers
select * from customers
where city in
(select city from suppliers);
8. Using Any with subquery
-- Find customers details where they ordered more than 20 items in a single order
select * from customers
where customer_id = any
(
select orders.customer_id from orders
inner join order_details on orders.order_id = order_details.order_id
where quantity > 20
);
9. Using All with subquery
-- Find all products where their order amount were lower than the average amount
of all the products
select * from products
inner join order_details on products.product_id = order_details.product_id
where
((order_details.unit_price * order_details.quantity) - order_details.discount) < all
(select
avg((unit_price * quantity) - order_details.discount)
from order_details);
10. Subquery using exists
-- Find suppliers with products that cost less than $100
select * from suppliers
where exists(
select * from products
where unit_price < 100
and products.supplier_id = suppliers.supplier_id
);

--------------------------------------------------------------------------------------
                           CTE (Common Table Expression)
-- Syntax
with cte_name (column_list) as (
cte_query_definition
)
statement;
1. Lets create a number series from 1 to 10
with num as
(
select * from generate_series(1, 10) as id
)
select * from num;
-- List all movies by director_id = 1
with cte_director_1 as
(
select *
from movies mv
inner join directors d on d.director_id = mv.director_id
where d.director_id = 1
)
select * from cte_director_1;
-- Lets view all long movies where long movies are 120 hrs and more
with cte_long_movies as
(
select movie_name, movie_length,
(
case
when movie_length < 100 then 'short'
when movie_length < 120 then 'medium'
else 'long'
end
) as m_length
from movies
)
select * from cte_long_movies
where m_length = 'long';
2. Combine CTE with a table
-- Lets calculate total revenues for each directors
with cte_movie_count as
(
select d.director_id,
sum(mr.revenues_domestic + mr.revenues_international) as totol_revenues
from directors d
inner join movies m on d.director_id = m.director_id
inner join movies_revenues mr on m.movie_id = mr.movie_id
group by d.director_id
)
select d.director_id, d.first_name, d.last_name, cte.totol_revenues
from cte_movie_count cte
inner join directors d on d.director_id = cte.director_id;

WITH cte_movie_count AS
(
SELECT
d.director_id,
SUM(COALESCE(r.revenues_domestic,0) + COALESCE(r.revenues_international,0)) AS total_revenues
FROM directors d
INNER JOIN movies mv ON mv.director_id = d.director_id
INNER JOIN movies_revenues r ON r.movie_id = mv.movie_id
GROUP BY d.director_id
)
SELECT
d.director_id,
d.first_name,
d.last_name,
cte.total_revenues
FROM cte_movie_count cte
INNER JOIN directors d ON d.director_id = cte.director_id;

3. Simultaneous delete, insert via cte
-- lets first create our tables
create table articles (
article_id serial primary key,
title varchar
);
-- create delete_articles table
create table articles_delete as select * from articles limit 0; 

-- Lets insert some data in articles
insert into articles (title) values
('article1'),
('article2'),
('article3'),
('article4');
-- Lets create our cte
with cte_delete_articles as
(
delete from articles
where article_id = 1
returning *
)
insert into articles_delete
select * from cte_delete_articles;
-- Lets do similar operations with insert
-- suppose we want to move all data from articles to another table articles_insert
-- first create the table articles_insert
create table articles_insert as select * from articles limit 0;
-- Now lets do our cte
with cte_move_articles as
(
delete from articles
returning *
)
insert into articles_insert
select * from cte_move_articles;
4. Recursive cte
-- Creating a time series with recursive cte
with recursive series (list_num) as
(
select 10
union all
select list_num + 5 from series
where list_num + 5 <= 50
)
select list_num from series;
-- Lets create our sample table which will contains some hierarchical data
create table items (
pk serial primary key,
name text not null,
parent int
);
-- Lets insert some sample data
insert into items (pk, name, parent) values
(1, 'vegetables', 0),
(2, 'fruits', 0),
(3, 'apple', 2),
(4, 'banana', 2);
-- Lets show data in parent-child relationship
with recursive cte_tree as
(
select name, pk, 1 as tree_level
from items
where parent = 0
union
select tt.name || ' -> ' || ct.name,
ct.pk, tt.tree_level + 1
from items ct
join cte_tree tt on tt.pk = ct.parent

)
select tree_level, name from cte_tree
order by tree_level;

-------------------------------------------------------------------------------------
                                    Grouping sets
1. Lets create our test table in which we will be calculating the subtotals
create table courses (
course_id serial primary key,
course_name varchar not null,
course_level varchar not null,
sold_units int not null
);
-- Lets insert some test data
insert into courses(course_name, course_level, sold_units) values
('Machine Learning with Python', 'Premium', 100),
('Data Science Bootcamp', 'Premium', 50),
('Introduction to Python', 'Basic', 200),
('Understanding MongoDB', 'Promium', 100),
('Algorithium Design in Python', 'Premium', 200);
-- Lets find total unit sold by each course level
select course_level,
sum(sold_units)
from courses
group by course_level;
-- Lets organize a bit more via order by
select course_level,
course_name,
sum(sold_units) as "Total Sold"
from courses
group by course_level, course_name
order by course_level, course_name;
-- Lets use rollup
select course_level,
course_name,
sum(sold_units) as "Total Sold"
from courses
group by rollup (course_level, course_name)
order by course_level, course_name;
-- We can even do a partial rollup too
select course_level,
course_name,
sum(sold_units) as "Total Sold"
from courses
group by
course_level,
rollup (course_name)
order by course_level, course_name;

-- Adding Subtotals with rollup
-- Lets create a test table of inventory items
create table inventory (
inventory_id serial primary key,
category varchar not null,
sub_category varchar not null,
product varchar not null,
quantity int
);
-- Lets insert some sample data
insert into inventory(category, sub_category, product, quantity) values
('Furniture', 'Chair', 'Black', 10),
('Furniture', 'Chair', 'Brown', 10),
('Furniture', 'Desk', 'Blue', 10),
('Equipment', 'Computer', 'Mac', 5),
('Equipment', 'Computer', 'PC', 5),
('Equipment', 'Monitor', 'Dell', 5);
-- Lets group the data by category and sub_category. i.e. product is broken down by 
category and sub category
select category, sub_category,
sum(quantity) as "Quantity"
from inventory
group by category, sub_category;
-- What if we want to see the subtotal of each category and a final total?
-- Lets do group sets on category and subcategory using rollup
select category, sub_category,
sum(quantity) as "Quantity"
from inventory
group by rollup (category, sub_category);
-- Using grouping with rollup
select category, sub_category,
sum(quantity) as "Quantity",
grouping(category) as "Category Grouping",
grouping(sub_category) as "SubCategory Grouping"
from inventory
group by rollup (category, sub_category)
order by category, sub_category;
-- We can make it even more pretty by handling both 0 or 1 output of grouping

--------------------------------------------------------------------------------------
                                    Crosstab
1. Creating table
create table scores(
score_id serial primary key ,
name varchar,
subject varchar,
score numeric(4,2),
score_date date
);

insert into scores(name, subject, score, score_date) values
('Adam', 'Math', 10, '2020-01-01'),
('Adam', 'English', 8, '2020-02-01'),
('Adam', 'History', 7, '2020-03-01'),
('Adam', 'Music', 9, '2020-04-01'),
('Linda', 'Math', 12, '2020-01-01'),
('Linda', 'English', 10, '2020-02-01'),
('Linda', 'History', 8, '2020-03-01'),
('Linda', 'Music', 6, '2020-04-01');
select * from crosstab (
$$ select name, subject, score from scores $$
) as ct (
name varchar,
English numeric,
History numeric,
Math numeric,
Music numeric
);

select * from crosstab(
$$ select location, year, sum(raindays) from rainfalls group by location, year order by location, year $$
) as ct(
"Location" text,
"2012" bigint,
"2013" bigint,
"2014" bigint,
"2015" bigint,
"2016" bigint,
"2017" bigint
);
select * from crosstab(
$$ select location, month, sum(raindays) from rainfalls group by location, month order by location, month $$
) as ct(
Location text,
"jan" bigint,
"feb" bigint,
"mar" bigint,
"apr" bigint,
"may" bigint,
"jun" bigint,
"jul" bigint,
"aug" bigint,
"sep" bigint,
"oct" bigint,
"nov" bigint,
"dec" bigint
);













--------------------------------------------------------------------------------------


                                Window Functions
1. Analyze Global trades data
-- Create our trades table
create table trades (
region text,
country text,
year int,
imports numeric(50, 0),
exports numeric(50, 0)
);
-- Using aggregate functions
-- Total min, max and average imports and exports
select min(imports),
max(imports),
avg(imports)
from trades;
-- Average imports/exports by region
select region,
min(imports) / 100000,
max(imports) / 100000,
avg(imports) / 100000
from trades
group by region;

-- Using group with rollup
select region,
round((avg(imports) / 1000000000), 2)
from trades
group by rollup (region);
select region, country,
round((avg(imports)/1000000000), 2)
from trades
where country in ('USA', 'France', 'Germany', 'Brazil')
group by rollup (region, country);

-- Using group with cube
select region, country,
round((avg(imports)/1000000000), 2)
from trades
where country in ('USA', 'France', 'Germany', 'Brazil')
group by cube (region, country);
-- Using group with grouping sets
select region, country,
round((avg(imports)/1000000000), 2)
from trades
where country in ('USA', 'France', 'Germany', 'Brazil')
group by grouping sets ((),region, country);

-- Using filter clause
select region, avg(exports) as avg_all,
avg(exports) filter ( where year < 1995 )
from trades
group by region;
-- Using window functions
select avg(exports) from trades; 72443407670.13915858

select country, year, imports, avg(exports) over () as avg_exports
from trades;
-- Partition the data
-- Let s take average of the country
select country, year, imports, avg(exports) over (partition by country) as avg_exports
from trades;
-- Can we filter the data in partition by?
select country, year, imports, avg(exports) over (partition by year < 2000 ) as avg_exports
from trades;
-- Ordering inside window
-- Lets take a max exports for some specific countries for period 2001 onwards
select country, year, exports,
min(exports) over (partition by country order by year)
from trades
where year > 2001;


select country, year, exports,
min(exports) over (partition by country order by year)
from trades
where year > 2001 and country in ('USA', 'France');

-- Rand and Dense Rank
select year, exports,
rank() over (order by exports desc ) as r
from trades
where country = 'USA';
select * from (select year,
exports,
rank() over (order by exports desc ) as r
from trades
where country = 'USA') as T;
select * from (select year,
exports,
rank() over (order by exports desc ) as r
from trades
where country = 'USA') as T
where r = 5;

-- NTILE function
-- Get USA exports from year 2000 into 4 buckets
select year, exports,
ntile(10) over (order by exports)
from trades
where country = 'USA' and year > 2000;

select country, year, exports,
ntile(10) over (order by exports)
from trades
where country in('USA', 'France') and year > 2000;

select * from (select country,
year,
exports,
ntile(10) over (order by exports) as r
from trades
where country in ('USA', 'France')
and year > 2000) as T where r = 2;

-- Lead and Lag
-- Lets calculate the difference of exports from one year to another year for Belgium
select year, exports,
lead(exports, 1) over (order by year)
from trades
where country = 'Belgium';

select year, exports,
lag(exports, 1) over (order by year)
from trades
where country = 'Belgium';

-- Firts_value, last_value, nth_value
select year, imports,
first_value(imports) over (order by year)
from trades
where country = 'Belgium';

select year, imports,
last_value(imports) over (order by year)
from trades
where country = 'Belgium';

select year, imports,
last_value(imports) over (order by country)
from trades
where country = 'Belgium';
select year, imports,
nth_value(imports, 2) over (order by year)
from trades
where country = 'Belgium';
select country, year, imports,
first_value(imports) over (partition by country order by year)
from trades
where country in ('Belgium', 'USA') and
year > 2014;
-- Row_number
select year, imports,
row_number() over (order by imports)
from trades
where country = 'France';
select country, year, imports,
row_number() over (partition by country order by imports)
from trades;
-- Get the 4th imports from each country order by year
select * from (
select country, year, imports,
row_number() over (partition by country order by imports)
from trades) as t
where row_number = 4;

-- Finding correlations
select country,
corr(imports, exports)
from trades
group by country;
-- Select first name, last name, salary and department names for all employees and then
use Row_number to order by salary
select e.first_name,
e.last_name,
e.salary,
d.department_name,
row_number() over (partition by department_name order by e.salary)
from employees e
inner join departments d on e.department_id = d.department_id;
-- Select first name, last name, salary and department names for 2nd highest paid employee
for each department
select * from (select e.first_name,
e.last_name,
e.salary,
d.department_name,
row_number() over (partition by department_name order by e.salary desc)
from employees e
inner join departments d on e.department_id = d.department_id) as t
where row_number = 2;

-- Using distinct with row_number
-- Get all distinct salaries and assign row_number on them
-- using distinct in select from
select distinct
salary,
row_number() over (order by salary)
from employees
order by salary desc;
select salary,
row_number() over (
order by salary desc
)
from (
select distinct
salary
from employees
) as t;
select e.first_name,
e.last_name,
e.salary,
d.department_name,
row_number() over (
partition by d.department_name
order by e.salary
)
from employees e
inner join departments d on e.department_id = d.department_id;
select * from (
select e.first_name,
e.last_name,
e.salary,
d.department_name,
row_number() over (
partition by d.department_name
order by e.salary
)
from employees e
inner join departments d on e.department_id = d.department_id) as t
where row_number between 6 and 10;

-- Using over() employee salary percentage vs. total
select first_name,
salary,
sum(salary) over ()
from employees;

select first_name,
salary,
salary / sum(salary) over () * 100
from employees;

-- Difference compared to average
select first_name, salary,
salary - avg(salary) over () as avg,
avg(salary) over () as total_avg
from employees;
-- Cumulative totals using window function
select first_name,
salary,
sum(salary) over(),
sum(salary) over(order by salary desc rows between unbounded preceding and current row)
from employees;
-- Comparing with next values
select first_name,
salary,
lead(salary) over (order by salary desc ),
salary - lead(salary, 1) over(order by salary desc) as difference_next
from employees;
-- Compared to lowest-paid employee
select first_name,
salary,
salary - last_value(salary) over w as more,
((salary - last_value(salary) over w) / last_value(salary) over w) * 100 as pct_more
from employees
window w as (order by salary desc rows between unbounded preceding and unbounded following);

-- Dense and dense_rank
select first_name,
salary,
rank() over w,
dense_rank() over w
from employees
window w as (order by salary desc );
-- Rand and global rank
select first_name,
salary,
department_id,
rank() over w_department,
rank() over w_all_departments
from employees
window w_department as (partition by department_id order by salary desc ),
w_all_departments as (order by salary desc )
order by department_id, salary desc;
-- partition by departments
select first_name,
salary,
department_id,
avg(salary) over (),
avg(salary) over (partition by department_id)
from employees;


-------------------------------------------------------------------------------------
                      Using regular expressions for text patterns
-- similar to
select 'same' similar to 'same';
select 'same' similar to 's%';
select 'same' similar to '%s';
-- | denotes alternation (either of two alternatives)
select 'same' similar to '%(s|a)%';
select 'same' similar to '(m|e)%';
-- POSIX regular expression
select 'same' ~ 'same' as result;
select 'same' ~ 'Same' as result;
select 'same' ~* 'Same' as result;
select 'same' !~ 'Same' as result;
select 'same' !~* 'Same' as result;
-- Substring with regular expression
-- single character
select substring('Ahe movie will start at 8 p.m on Dec 10, 2020.' from '.');
-- all characters
select substring('Ahe movie will start at 8 p.m on Dec 10, 2020.' from '.*');
-- any character after say 'movie'
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'movie.+');
-- one or more word characters from the start
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\w');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\w.');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\w.+');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\w+');
-- reverse order
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\w+.$');
-- 8 a.m or p.m
-- ? preceding match, (?: ) negate the repoting or capture group
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\d{1,2}');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\d{1,2} (a.m|p.m)');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\d{1,2} (?:a.m|p.m)');
-- Lets get the year
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\d{2}');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from '\d{4}');
-- Either the word 'Nov' or 'Dec'
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Nov|Dec');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Nov|Dec1');
-- Dec 10, 2020
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Dec');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Dec \d{2}');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Dec \d{2},');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Dec \d{2}, \d');
select substring('The movie will start at 8 p.m on Dec 10, 2020.' from 'Dec \d{2}, \d{4}');
-- Regexp_matches
-- single match
select regexp_match('Amazing #PostgreSQL', '#');
select regexp_match('Amazing #PostgreSQL', '#([A-Za-z0-9_])');
select regexp_match('Amazing #PostgreSQL', '#([A-Za-z0-9_]+)');
select regexp_matches('Amazing #PostgreSQL', '#([A-Za-z0-9_]+)');
select regexp_matches('Amazing #PostgreSQL #SQL', '#([A-Za-z0-9_]+)', 'g');
select regexp_matches('Amazing #PostgreSQL', '');
-- multiple matches
select regexp_matches('1111 2222-A 3333-B 4444-C', '[A-Z]', 'g');
-- Regexp_Replace 
-- Reverse words
select regexp_replace('Parvin Etibarli', '(.*) (.*)','\2 \1');
-- Have only characters data
select regexp_replace('ABCD12345xyz', '[[:digit:]]', '', 'g');
-- Have only numeric data
select regexp_replace('ABCD12345xyz', '[[:alpha:]]', '', 'g');
-- Data change
select regexp_replace('2019-10-10', '\d{4}', '2020');
-- Regexp_split_to_table function
select regexp_split_to_table('Adnan,James,Linda', ',');

--------------------------------------------------------------------------------------
                        Powerful text searches in PostgreSQL
-- to_tsvector
select to_tsvector('washes');
select to_tsvector('washed');
select to_tsvector('washing');

select to_tsvector('The quick brown fox jumped over the lazy dog.');
select to_tsvector('Hey wait, I am walking across the sitting room to sit with you.');
-- using tsquery with query
select to_tsvector('This is a lamp')
@@ to_tsquery('lamp');
select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('fox');
select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('foxes');
select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('fox & dog');
select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('fox | tiger');
select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('!tiger');

select to_tsvector('The quick brown fox jumped over the lazy dog.')
@@ to_tsquery('!dog');
-- Full text search within a table
create table docs(
doc_id serial primary key,
doc_text text,
doc_text_search tsvector
);

insert into docs (doc_text) values
('The five boxing wizards jump quickly.'),
('Pack my box with five dozen pepsi jugs.'),
('How vexingly quick daft zebras jump!'),
('Jackdaws love my big sphinx of quartz.'),
('Sphinx of black quartz, judge my vow.'),
('Bright vixens jump: dozy fowl quack.');

update docs
set doc_text_search = to_tsvector(doc_text);

select doc_id,
doc_text
from docs
where doc_text_search @@ to_tsquery('jump');

select doc_id,
doc_text
from docs
where doc_text_search @@ to_tsquery('jump & quick');
-- <-> find words next to each other
select doc_id,
doc_text
from docs
where doc_text_search @@ to_tsquery('jump <-> quick');
select doc_id,
doc_text,
doc_text_search
from docs
where doc_text_search @@ to_tsquery('sphinx <2> quartz');
-- Setup presidents speeches data
create table docs_presidents(
doc_id serial primary key,
president varchar not null,
title varchar not null,
speech_date date not null ,
speech_text text not null ,
speech_text_search tsvector
);
movies=# \copy docs_presidents (president, title, speech_date, speech_text) FROM 'president_speeches.csv' WITH (FORMAT CSV, DELIMITER '|', HEADER OFF, QUOTE '@');
update docs_presidents
set speech_text_search = to_tsvector('english', speech_text);
Create special index for tsvector
create index idx_gin_docs_presidents_speech_text_search on
docs_presidents using gin(speech_text_search);

select president,
title,
speech_date
from docs_presidents
where speech_text_search @@ to_tsquery('World');

--------------------------------------------------------------------------------------
                                Partitioning
-- Table inheritance
create table master (
pk integer primary key ,
tag text,
parent integer
);
create table master_child() inherits (master);

alter table master_child
add constraint master_pk primary key (pk);
insert into master (pk, tag, parent) values
(1, 'pen', 0);

insert into master_child (pk, tag, parent) values
(2, 'pencil', 0);

select * from master;
select * from master_child;
-- for only
select * from only master;
-- lets see effects of updates operations
update master
set tag = 'monitor'
where pk = 2;
delete from master where pk = 2;

-- Dropping master table
cannot drop table master because other objects depends on it
drop table master cascade ;

-- Partition by range
create table employee_range (
id bigserial,
birth_date date not null ,
country_code varchar(2) not null
) partition by range (birth_date);

create table employee_range_y2000 partition of employee_range
for values from ('2000-01-01') to ('2001-01-01');

create table employee_range_y2001 partition of employee_range
for values from ('2001-01-01') to ('2002-01-01');
-- Lets insert some data
insert into employee_range (birth_date, country_code) values
('2000-01-01', 'US'),
('2000-01-02', 'US'),
('2000-12-31', 'US'),
('2001-01-01', 'US');
select * from employee_range;
select * from only employee_range;
select * from employee_range_y2000;
select * from employee_range_y2001;
-- update operations
update employee_range
set birth_date = '2001-10-10'
where id = 1;
-- Delete operations
delete from employee_range
where id = 1;
-- Partition by list
create table employees_list(
id bigserial,
birth_date date not null ,
country_code varchar not null
) partition by list (country_code);

create table employees_list_us partition of employees_list
for values in ('US');

create table employees_list_eu partition of employees_list
for values in ('UK', 'DE', 'IT', 'FR', 'ES');

insert into employees_list (id, birth_date, country_code) values
(1, '2000-01-01', 'US'),
(1, '2000-01-02', 'US'),
(1, '2000-12-31', 'UK'),
(1, '2001-01-01', 'DE');
-- update uperations
update employees_list
set country_code = 'US'
where id = 4;
-- partition by hash
create table employees_hash(
id bigserial,
birth_date date not null ,
country_code varchar not null
) partition by hash (id);

insert into employees_hash (id, birth_date, country_code) values
(1, '2000-01-01', 'US'),
(2, '2000-01-02', 'US'),
(3, '2000-12-31', 'UK');

create table employees_hash_1 partition of employees_hash
for values with (modulus 3, remainder 0);

create table employees_hash_2 partition of employees_hash
for values with (modulus 3, remainder 1);

create table employees_hash_3 partition of employees_hash
for values with (modulus 3, remainder 2);


-- Default partition
-- What happens when you try to insert a record that can't fit into any partition?
insert into employees_list (id, birth_date, country_code) values
(10, '2001-01-01', 'JP');
this throws error
-- Creating default partition
create table employee_list_default partition of employees_list default ;
-- now lets insert
insert into employees_list (id, birth_date, country_code) values
(10, '2001-01-01', 'JP');

-- Multi level partitioning
-- Lets create a new fresh master table to demonstrate that
create table employees_master (
id bigserial,
birth_date date not null ,
country_code varchar(2) not null
) partition by list (country_code);
-- Create main partitions
-- US list
-- EU list
create table employees_master_us partition of employees_master
for values in ('US');

create table employees_master_eu partition of employees_master
for values in ('UK', 'DE', 'IT', 'FR', 'ES')
partition by hash (id);

-- Create sub partitions
create table employees_master_eu_1 partition of employees_master_eu
for values with (modulus 3, remainder 0);

create table employees_master_eu_2 partition of employees_master_eu
for values with (modulus 3, remainder 1);

create table employees_master_eu_3 partition of employees_master_eu
for values with (modulus 3, remainder 2);
-- Lets insert some data
insert into employees_master (id, birth_date, country_code) values
(1, '2000-01-01', 'US'),
(2, '2000-01-02', 'US'),
(3, '2000-12-31', 'UK'),
(4, '2001-01-01', 'DE');
-- Attach a new partition
create table employees_list_sp partition of employees_list
for values in ('SP');
insert into employees_list (id, birth_date, country_code) values
(10, '2020-01-01', 'SP');

-- Detach a partition
alter table employees_list detach partition employees_list_sp;

-- Altering the bounds of a partition
create table t1 (a int, b int) partition by range (a);

create table t1p1 partition of t1 for values from (0) to (100);
create table t1p2 partition of t1 for values from (200) to (300);
insert into t1 (a, b) values (1, 1);
-- Begin
-- Detach
-- Alter 
-- Attach
-- Commit
begin transaction;
alter table t1 detach partition t1p1;
alter table t1 attach partition t1p1 for values from (0) to (200)
commit transaction;

-- Partition indexes
-- Lets create an index on employees_list parent table with parent key id only

---------------------------------------------------------------------------------------
                            Functions with SQL language
-- Syntax
Create or replace function function_name() returns void as 
'
 SQL command
' language SQL
create or replace function fn_mysum(int, int)
returns int as
'
select $1 + $1;
' language sql;
select fn_mysum(1, 2);
-- Introduction to dollar quotes
create or replace function fn_mysum(int, int)
returns int as
$$
select $1 + $2;
$$ language sql;

select fn_mysum(1, 2);
create or replace function fn_mysum(int, int)
returns int as
$body$
select $1 + $2;
$body$ language sql;

select fn_mysum(1, 2);
-- Function returning no values
-- Returns void
create or replace function fn_employees_update_country()
returns void as
$$
update employees
set country = 'N/A'
where country is null;
$$ language sql;

select fn_employees_update_country();
-- Function returning single value
-- Get the minimum price of product
create or replace function fn_product_min_price()
returns integer as
$$
select min(unit_price)
from products;
$$ language sql;

select fn_product_min_price();
-- Get the biggest order even placed
create or replace function fn_biggest_order()
returns double precision as
$$
select max(amount) from
(
select order_id,
sum(unit_price * quantity) as amount
from order_details
group by order_id
) as total_amount;
$$ language sql;

select fn_biggest_order();
-- Get total count of customers
select count(*) from customers;

create or replace function fn_api_get_total_customers()
returns bigint as
$$
select count(*) from customers;
$$ language sql;

select fn_api_get_total_customers();
-- Get total customers with empty fax number
create or replace function fn_api_total_customers_empty_fax()
returns bigint as
$$
select count(*) from customers
where fax is null;
$$ language sql;
-- Lets create a mid function with input parameters like string and starting_point
create or replace function fn_mid(p_string varchar, p_starting_point integer)
returns varchar as
$$
select substring(p_string, p_starting_point);
$$ language sql;

select fn_mid('Amazing PostgreSQL', 9);
-- Get total customers by city
create or replace function fn_api_get_total_customers_by_city(p_city varchar)
returns bigint as
$$
select count(*) from customers
where city = p_city;
$$ language sql;

select fn_api_get_total_customers_by_city('Paris');
create or replace function fn_api_get_total_customers_by_country(p_country varchar)
returns bigint as
$$
select count(*) from customers
where country = p_country;
$$ language sql;

select fn_api_get_total_customers_by_country('UK');
-- Get total orders by a customer
create or replace function fn_api_customer_total_orders(p_customer_id bpchar)
returns bigint as
$$
select count(*)
from orders
natural join customers
where customer_id = p_customer_id;
$$ language sql;

select fn_api_customer_total_orders('VINET');
-- Get the biggest order amount placed by a customer
create or replace function fn_api_customer_largest_order(p_customer_id bpchar)
returns double precision as
$$
select max (order_amount)
from (
select orders.order_id,
sum((unit_price * quantity) - discount) as order_amount
from order_details
natural join orders
where orders.customer_id = p_customer_id
group by orders.order_id) as total_amount;
$$ language sql;

select fn_api_customer_largest_order('ALFKI');
-- Functions returning a composite
-- Returns a single row, in the form of an array style
create or replace function fn_api_order_latest()
returns orders as
$$
select * from orders order by order_date desc,
order_id desc limit 1
$$ language sql;

select fn_api_order_latest();

select (fn_api_order_latest()).*;
select (fn_api_order_latest()).order_id;
select customer_id (fn_api_order_latest());
select customer_id from fn_api_order_latest();
create or replace function fn_api_order_latest_by_date_range(p_from date, p_to date)
returns orders as
$$
select * from orders
where order_date between p_from and p_to
order by order_date desc,
order_id desc limit 1
$$ language sql;
-- Most recent hire
create or replace function fn_api_employees_latest_hire()
returns employees as
$$
select * from employees
order by hire_date desc
$$ language sql;

select * from fn_api_employees_latest_hire();
-- Functions returning multiple rows
-- List all employees hire in a particular year
create or replace function fn_api_employees_hire_date_by_year(p_year integer)
returns setof employees as
$$
select * from employees
where extract('year' from hire_date) = p_year
$$ language sql;

select * from fn_api_employees_hire_date_by_year('1992');
-- List all products where total order amount is greater than an input amount
create or replace function fn_api_products_total_amount_by(p_amount integer)
returns setof products as
$$
select * from products where product_id in
(select product_id from
(select product_id,
sum((unit_price * quantity) - discount) as product_total_sale
from order_details group by product_id
having sum((unit_price * quantity) - discount) > p_amount) as t1)
$$ language sql;

select * from fn_api_products_total_amount_by(100000);
-- Function returning a table
create or replace function fn_api_customer_top_orders(p_customer_id bpchar, p_limit integer)
returns table
(
order_id smallint,
customer_id bpchar,
product_name varchar,
unit_price real,
quantity smallint,
total_order double precision
) as
$$
select orders.order_id,
orders.customer_id,
products.product_name,
order_details.unit_price,
order_details.quantity,
((order_details.unit_price * order_details.quantity) - order_details.discount) as total_order
from order_details
natural join orders
natural join products
where orders.customer_id = p_customer_id
order by ((unit_price * quantity) - discount) desc
limit p_limit
$$ language sql;

select * from fn_api_customer_top_orders('VINET', 10);

-- Function parameters with default values
-- Lets do sum of three numbers
create or replace function fn_sum_3 (x int, y int default 10, z int default 10)
returns integer as
$$
select x + y + z
$$ language sql;

select fn_sum_3(10);
select fn_sum_3(1, 2, 3);
-- Lets set a new pricing with a 7% increase
create or replace function fn_api_new_price(products, percentage_increase numeric default 107)
returns double precision as
$$
select $1.unit_price * percentage_increase / 100;
$$ language sql;

select
product_id,
product_name,
unit_price,
fn_api_new_price(products.*) as new_price
from products;
-- Function based on views
create or replace view v_active_queries as
select
pid,
usename,
query_start,
(current_timestamp - query_start) as runtime,
query
from pg_stat_activity
where state = 'active'
order by 4 desc;

create or replace function fn_internal_active_queries(p_limit int)
returns setof v_active_queries as
$$
select * from v_active_queries
limit p_limit
$$ language sql;

select * from fn_internal_active_queries(10);
-- Drop a function
create or replace function fn_sum(x int, y int)
returns integer as
$$
select x + y;
$$ language sql;

select fn_sum(1, 2);

drop function fn_sum;
-------------------------------------------------------------------------------------
                                PL/pqSQL
-- Lets get the max price of all product
create or replace function fn_api_products_max_price()
returns bigint as
$$
begin
return max(unit_price) from products;
end;
$$ language plpgsql;

select fn_api_products_max_price();
-- Declaring variables

do
$$
declare
mynum integer :=1;
first_name varchar(100) :='Adnan';
hire_date date :='2020-01-01';
start_time timestamp :=now();
emptyvar integer;
begin
raise notice 'My variables % % % % %',
mynum,
first_name,
hire_date,
start_time,
emptyvar;
end;
$$
 

do
$$
declare
mynum integer :=1;
first_name varchar(100) :='Adnan';
hire_date date :='2020-01-01';
start_time timestamp =now();
emptyvar integer;
var1 integer :=10;
begin
raise notice 'My variables % % % % % %',
mynum,
first_name,
hire_date,
start_time,
emptyvar,
var1;
end;
$$
-- Declaring variables via alias for
-- Declaring variables in function
create or replace function fn_my_sum(integer, integer)
returns integer as
$BODY$
declare
ret integer;
begin
ret := $1 + $2;
return ret;
end;
$BODY$ language plpgsql;

select fn_my_sum(1, 2);
create or replace function fn_my_sum(integer, integer)
returns integer as
$BODY$
declare
ret integer;
x alias for $1;
y alias for $2;
begin
ret := x + y;
return ret;
end;
$BODY$ language plpgsql;

select fn_my_sum(1, 2);
-- Variable initialization timing

do
$$
declare
start_time time :=now();
begin
raise notice 'Starting at : %', start_time;
perform pg_sleep(2);
raise notice 'Next time : %', start_time;
end;
$$ language plpgsql;

-- Copying data types
-- Lets return the name of the product for a particular product id

do
$$
declare
product_title products.product_name%type;
begin
select product_name
from products
into product_title
where product_id = 1
limit 1;
raise notice 'Your product name is %', product_title;
end;
$$
-- How about accessing the full row result set

do
$$
declare
row_product record;
begin
select * from products
into row_product
where product_id = 1
limit 1;
raise notice 'Your product name is %', row_product.product_name;
end;
$$
-- Using in, out without returns
-- Lets create a function to calculate a sum of three integers
create or replace function fn_my_sum_2_par(in x integer, in y integer, out z integer) as
$$
BEGIN
z := x + y;
end;
$$ language plpgsql;

select fn_my_sum_2_par(1, 2);
-- Can we also output more than one return
create or replace function fn_my_sum_2_par1(in x integer, in y integer, out w integer, out z integer) as
$$
BEGIN
z := x + y;
w := x * y;
end;
$$ language plpgsql;

select fn_my_sum_2_par1(3, 2);
-- How to return query results
create or replace function fn_api_orders_latest_top_10_orders()
returns setof orders as
$$
begin
return query
select * from orders
order by order_date desc
limit 10;
end;
$$ language plpgsql;

select * from fn_api_orders_latest_top_10_orders();
-- Control structures -if statement
create or replace function fn_my_check(x integer default 0, y integer default 0)
returns text as
$$
begin
if x > y then
return 'x > y';
elsif x = y then
return 'x = y';
else
return 'y > x';
end if;
end;
$$ language plpgsql;

select * from fn_my_check(3, 3);
-- Using If with table data
create or replace function fn_api_products_category(price real)
returns text as
$$
begin
if price > 50 then
return 'High';
elsif price > 25 then
return 'Medium';
else
return 'Sweet spot';
end if;
end;
$$ language plpgsql;

select fn_api_products_category(unit_price)
from products;

select fn_api_products_category(unit_price),*
from products;

-- Case statement
create or replace function fn_my_check_value(x integer default 0)
returns text as
$$
begin
case x
when 10 then
return 'Value = 10';
when 20 then
return 'Value = 20';
else
return 'No values found, returning input value x';
end case;
end;
$$ language plpgsql;

select fn_my_check_value(40);

create or replace function fn_api_order_ship_via (shipvia smallint)
returns text as
$$
begin
case shipvia
when 1 then
return 'Speedy';
when 2 then
return 'United';
when 3 then
return 'Federal';
else
return 'Unknown';
end case;
end;
$$ language plpgsql;

select fn_api_order_ship_via(ship_via)
from orders;

select fn_api_order_ship_via(ship_via),*
from orders;
-- Searched case statement

do
$$
declare
total_amount numeric;
order_type varchar(50);
begin
select
sum((unit_price * quantity) - discount) into total_amount
from order_details
where order_id = 10248;
if found then
case
when total_amount > 200 then
order_type = 'Platinum';
when total_amount > 100 then
order_type = 'Gold';
else
order_type = 'Silver';
end case;
raise notice 'Order Amount, Order Type % %', total_amount, order_type;
else
raise notice 'No order was found';
end if;
end;
$$language plpgsql;

-- Loop Statement

do
$$
declare
i_counter integer = 0;
begin
loop
raise notice '%', i_counter;
i_counter := i_counter + 1;
exit when i_counter = 5;
end loop;
end;
$$language plpgsql;
-- For loop

do
$$
begin
for counter in 1..10
loop
raise notice 'Counter : %', counter;
end loop;
end;
$$language plpgsql;

do
$$
begin
for counter in reverse 10..1
loop
raise notice 'Counter : %', counter;
end loop;
end;
$$language plpgsql;
-- stepping

do
$$
begin
for counter in 1..10 by 2
loop
raise notice 'Counter : %', counter;
end loop;
end;
$$language plpgsql;
-- For loops iterate over result set

do
$$
declare
rec record;
begin
for rec in
select order_id, customer_id from orders limit 10
loop
raise notice '%, %', rec.order_id, rec.customer_id;
end loop;
end;
$$language plpgsql;
-- Lets print odd numbers only from 1 to 20

do
$$
declare
i_counter int = 0;
begin
loop
i_counter = i_counter + 1;
exit when i_counter > 20;
continue when mod(i_counter, 2) = 0;
raise notice '%', i_counter;
end loop;
end;
$$language plpgsql;

-- Foreach loop with arrays

do
$$
declare
arr1 int[] := array [1, 2];
var int;
begin
foreach var in array arr1
loop
raise info '%', var;
end loop;
end;
$$language plpgsql;

do
$$
declare
arr1 int[] := array [1, 2, 3, 4];
arr2 int[] := array [5, 6, 7, 8, 9, 10];
var int;
begin
foreach var in array arr1 || arr2
loop
raise info '%', var;
end loop;
end;
$$language plpgsql;

-- While loops
create or replace function fn_while_loop_sum_all(x integer)
returns numeric as
$$
declare
counter integer :=1;
sum_all integer :=0;
begin
while counter <= x
loop
sum_all := sum_all + counter;
counter := counter + 1;
end loop;
return sum_all;
end;
$$ language plpgsql;

select fn_while_loop_sum_all(4);

create or replace function fn_create_table_insert_values(x integer)
returns boolean as
$$
declare
counter integer :=1;
done boolean :=false;
begin
execute format('create table if not exists t_table (id int)');
while counter <= x
loop
insert into t_table (id) values (counter);
counter := counter + 1;
end loop;
return done;
end;
$$ language plpgsql;

select fn_create_table_insert_values(4);

select * from t_table;

-- Returning a table
create or replace function fn_api_productions_by_names(p_pattern varchar)
returns table (productname varchar, unitprice real)
as $$
begin
return query
select product_name,
unit_price
from products
where product_name like p_pattern;
end;
$$language plpgsql;

select * from fn_api_productions_by_names('A%');
-- Using return next
-- Lets get all orders where unit_price > 10
create or replace function fn_get_all_orders_greater_than()
returns setof order_details as
$$
declare
r record;
begin
for r in
select * from order_details where unit_price > 10
loop
return next r;
end loop;
return;
end;
$$ language plpgsql;

select * from fn_get_all_orders_greater_than();
-- Lets create December pricing where we will set new unit_price based on a category_id
create or replace function fn_products_new_dec_pricing()
returns setof products as
$$
declare
r record;
begin
for r in
select * from products
loop
case
when r.category_id in (1, 2, 3) then
r.unit_price = r.unit_price * 0.90;
when r.category_id in (3, 4, 5) then
r.unit_price = r.unit_price * 0.80;
else
r.unit_price = r.unit_price * 1.20;
end case;
return next r;
end loop;
return;
end;
$$ language plpgsql;

select * from fn_products_new_dec_pricing();
-- Error Handling
select *
from orders;

do
$$
declare
rec record;
orderid smallint = 10248;
begin
select
customer_id,
order_date
from orders
into strict rec
where order_id = orderid;
exception
when NO_DATA_FOUND then
raise exception 'No orderid was found %', orderid;
end;
$$ language plpgsql;
-- Exception - Too many rows
do
$$
declare
rec record;
begin
select customer_id,
company_name
from customers
into strict rec
where company_name like 'A%'
limit 1;
exception
when too_many_rows then
raise exception 'Your query returns too many rows';
end;
$$ language plpgsql;
-- Exception with data exception errors
create or replace function fn_div_exception(x real, y real)
returns real as
$$
declare
ret real;
begin
ret := x / y;
return ret;
end;
$$ language plpgsql;

select fn_div_exception(1, 2);

select fn_div_exception(1, 0);
select fn_div_exception(1, 2);

create or replace function fn_div_exception(x real, y real)
returns real as
$$
declare
ret real;
begin
ret := x / y;
return ret;
exception
when division_by_zero then
raise info 'DIVISION BY ZERO';
raise info 'Error % %', SQLSTATE, SQLERRM;
return 0;
end;
$$ language plpgsql;

select fn_div_exception(1, 0);

--------------------------------------------------------------------------------------
                               Exploring Stored Procedures

-- Create a transaction
create table t_accounts(
recid serial primary key,
name varchar not null ,
balance dec(15, 2) not null
);
-- Insert some data
insert into t_accounts(name, balance)
values
('Adam', 100),
('Linda', 100);
-- create our stored procedure
create or replace procedure pr_money_transfer(
sender int,
receiver int,
amount dec
) as
$$
begin
update t_accounts
set balance = balance - amount
where recid = sender;
update t_accounts
set balance = balance + amount
where recid = receiver;
commit;
end;
$$ language plpgsql;

call pr_money_transfer(1, 2, 100);

select *
from t_accounts;

-- Returning a value
create or replace procedure pr_orders_count(inout total_count integer default 0)
as
$$
begin
select count(*)
into total_count
from orders;
end;
$$ language plpgsql;

call pr_orders_count();
-- drop a procedure
drop procedure pr_orders_count;
--------------------------------------------------------------------------------------
                                Triggers
-- Lets create the 'players' table
create table players (
player_id serial primary key,
name varchar(100)
);
-- Lets create 'players_audit' table to store all changes
create table players_audit(
player_audit_id serial primary key ,
player_id int not null ,
name varchar(100) not null ,
edit_date timestamp not null
);
-- Lets create a function
create or replace function fn_players_name_changes_log()
returns trigger
language plpgsql
as
$$
begin
if new.name <> old.name then
insert into players_audit
(
player_id, name, edit_date
) values
(
old.player_id,
old.name,
now()
);
end if;
return new;
end;
$$
-- Now bind our newly created function to our table 'players'via create trigger statement
 

create trigger trg_players_name_changes
before update
on players
for each row
execute procedure fn_players_name_changes_log();
-- Lets insert some data
insert into players (name)
values ('Adam'), ('Linda');
-- Lets update some data
update players
set name = 'Linda2'
where player_id = 2;
update players
set name = 'Linda3'
where player_id = 2;

select * from players;

select * from players_audit;
-- Modify data at insert event
-- Lets create a table
create table t_temperature_log (
id_temperature_log serial primary key ,
add_date timestamp,
temperature numeric
);
-- Lets create a function to check the inserted data
create or replace function fn_temperature_value_check_at_insert()
returns trigger
language plpgsql
as $$
begin
if new.teperature < -30 then
new.temperature = 0;
end if;
return new;
end;
$$
-- Lets bind our function to our table
create trigger trg_temperature_value_check_at_insert
before insert on t_temperature_log
for each row
execute procedure fn_temperature_value_check_at_insert();
-- Lets insert some data
select * from t_temperature_log;
insert into t_temperature_log (add_date, temperature) values
('2020-10-01', -40);

select * from t_temperature_log;

-- View triggers variables
-- Lets add a new function which display some key trigger variables data
create or replace function fn_trigger_variables_display()
returns trigger language plpgsql
as $$
begin
raise notice 'tg_name: %', tg_name;
raise notice 'tg_relname: %', tg_relname;
raise notice 'tg_table_schema: %', tg_table_schema;
raise notice 'tg_table_name: %', tg_table_name;
raise notice 'tg_when: %', tg_when;
raise notice 'tg_level: %', tg_level;
raise notice 'tg_op: %', tg_op;
raise notice 'tg_nargs: %', tg_nargs;
raise notice 'tg_argv: %', tg_name;
return new;
end;
$$

-- Lets bind the function to table after the insert
create trigger trg_trigger_variables_display
after insert
on t_temperature_log
for each row
execute procedure fn_temperature_value_check_at_insert();
insert into t_temperature_log(add_date, temperature)
values ('2020-10-01', -10);
-- Disallowing delete
-- Lets create a test table
create table test_delete(
id int
);
-- Lets insert some data
insert into test_delete (id) values (1), (2);
-- Lets create a generic 'cancel' function for our use case here
create or replace function fn_generic_cancel_op()
returns trigger language plpgsql
as $$
begin
if tg_when = 'AFTER' then
raise exception 'You are not allowed to % rows in %.%',
tg_op, tg_table_schema, tg_table_name;
end if;
raise notice '% on rows in %.% wont happen',
tg_op, tg_table_schema, tg_table_name;
return null;
end;
$$;
-- Lets bind the function at after delete
create trigger trg_disallow_delete
after delete on test_delete
for each row
execute procedure fn_generic_cancel_op();
-- Now lets delete a record
delete from test_delete where id = 1;
-- Now lets create another trigger but this time before delete
create trigger trg_skip_delete
before delete on test_delete
for each row
execute procedure fn_generic_cancel_op();

delete from test_delete where id = 1;

select * from test_delete;

-- Disallowing truncate
-- Lets create an after trigger
create trigger trg_disallow_truncate
after truncate on test_delete
for each statement
execute procedure fn_generic_cancel_op();
-- Lets truncate the data
truncate test_delete;
-- The audit trigger
-- Lets create a master table 'audit'
create table audit(
id int
);
-- Lets create an audit_log table
create table audit_log (
username text,
add_time timestamp,
table_name text,
operation text,
row_before json,
row_after json
);
-- We will populate the above table with some internal variables
-- Lets create the trigger function
create or replace function fn_audit_trigger()
returns trigger
language plpgsql
as
$$
declare
old_row json = null;
new_row json = null;
begin
if tg_op in ('UPDATE', 'DELETE') then
old_row = row_to_json(old);
end if;
if tg_op in ('INSERT', 'UPDATE') then
new_row = row_to_json(new);
end if;
insert into audit_log
(
username,
add_time,
table_name,
operation,
row_before,
row_after
)
values
(
session_user,
current_timestamp at time zone 'UTC',
tg_table_schema || '.' || tg_table_name,
tg_op,
old_row,
new_row
);
return new;
end;
$$;
-- Lets create our trigger statement
create trigger trg_audit_trigger
after insert or update or delete
on audit for each row
execute procedure fn_audit_trigger();
-- Lets test with some data
insert into audit (id) values (2);

select * from audit;
select * from audit_log;
update audit
set id = '100' where id = 1;
delete from audit
where id = 2;

-- Creating conditional triggers
-- Lets create our master table
create table mytask(
task_id serial primary key ,
task text
);
-- We will create a generic function which will show a message and return null.
create or replace function fn_cancel_with_message()
returns trigger language plpgsql as
$$
begin
raise exception '%', TG_ARGV[0];
return null;
end;
$$
-- Lets create our trigger statement. We will run it on statement level and not row level
create trigger trg_no_update_on_friday_afternoon
before insert or update or delete or truncate
on mytask for each statement
when (
extract('DOW' from current_timestamp) = 5
and current_time > '12:00'
)
execute procedure fn_cancel_with_message('No update are allowed at Friday Afternoon, so chill!!!');
-- Lets test the insert
select extract('DOW' from current_timestamp),
current_time;

insert into mytask (task) values ('test');

select * from mytask;
create or replace trigger trg_no_update_on_friday_afternoon
before insert or update or delete or truncate
on mytask for each statement
when (
extract('DOW' from current_timestamp) = 4
and current_time > '04:00'
)
execute procedure fn_cancel_with_message('No update are allowed at Friday Afternoon, so chill!!!');
insert into mytask (task) values ('test3');
-- Disallow data change on primary key
-- Lets create our master table
create table pk_table(
id serial primary key ,
t text
);
-- We will insert some data first
insert into pk_table(t) values ('t1'), ('t2');
-- Lets re-use our trigger function created earlier
create or replace function fn_generic_cancel_op()
returns trigger language plpgsql as
$$
begin
if tg_when = 'AFTER' then
raise exception 'You are not allowed to % rows in %.%',
tg_op, tg_table_schema, tg_table_name;
end if;
raise notice '% on rows in %.% wont happen',
tg_op, tg_table_schema, tg_table_name;
return null;
end;
$$
-- Lets create our trigger statement
create trigger disallow_pk_change
after update of id on pk_table
for each row
execute procedure fn_generic_cancel_op();
-- Now lets try to update a primary key data
update pk_table
set id = 100
where id = 1;
-- Creating event triggers
-- Lets create our audit_ddl table
create table audit_ddl(
audit_ddl_id serial primary key,
username text,
ddl_event text,
ddl_command text,
ddl_add_time timestamptz
);
-- Lets create our event trigger function
create or replace function fn_event_audit_ddl()
returns event_trigger language plpgsql
security definer --
as
$$
begin
insert into audit_ddl
(
username,
ddl_event,
ddl_command,
ddl_add_time
)
values
(
session_user,
tg_event,
tg_tag,
now()
);
raise notice 'DDL activity is added!!';
end;
$$
-- Lets create our event trigger statement
-- without condition
create event trigger trg_evnt_audit_ddl
on ddl_command_start
execute procedure fn_event_audit_ddl();
-- with condition
create event trigger trg_evnt_audit_ddl
on ddl_command_start
when tag in ('CREATE TABLE')
execute procedure fn_event_audit_ddl();
-- Lets test our trigger
-- Now create the table
create table audit_ddl_test(
i int
);

select * from audit_ddl;

--------------------------------------------------------------------------------------
                                    Transactions
-- If anything interrupts the transaction before the commit statement ends the 
transactions, you can restore the system to its original state by issuing a 
rollback statement.
The rollback processes the transaction log in reverse, undoing all the actions that 
took place in the transaction.

-- An ACID database
A - Atomicity
C - Consistency
I - Isolation
D - Durability
All of the above four characteristics are necessary to protect a database from corruption

Atomicity
---------
Database transactions should be 'atomic' in nature i.e.
- The entire transaction should be treated as an individual unit
- Either it is executed in this entirety(commit), or the database is restored (rollback)
to its original state before the transaction was executed.

Consistency
-----------
The meaning of consistency here is not consistent; it should vary from application 
to application
- For example, in a bank application or environment, when you transfer funds from
one account to another, you want the total amount of the money from both account 
(from/to accounts) to be same at the end of the transaction. Over here consistency means more 
more of a balanced approach before and after a transaction is executed.

Isolation
---------
Database transactions should be totally isolated from other transactions that execute 
at the same time.
- If the transaction is serialized, then only total isolation is achieved.

Durability
----------
After a transaction has committed or rolled back, we should be able to count on the 
database being in the proper state i.e. up-to-date.
- Even if your system suffers a hard crash or any software downturn after a commit,
but before the transaction is stored to disk, a well designed and durable DBMS should 
guarantee that upon recovery from the crash, the database can be stored to its
proper state.

-- Transaction setup
create table accounts(
account_id serial primary key,
name varchar(100) not null ,
balance integer not null
);

insert into accounts (name, balance)
values ('Adam', 100), ('Bob', 100), ('Linda', 100);
-- Transaction analysis
-- Lets create/open a transaction
begin;
-- Lets update the data in connection
update accounts
set balance = balance - 50
where name = 'Adam';

commit;
-- How to fix transactions on crash
-- Lets update all balances to 200
update accounts
set balance = 200;
-- Lets begin with BEGIN
begin;

update accounts
set balance = balance - 50
where name = 'Adam';

select * from accounts;
-- Now lets mimic a connection error and see the effect
-- Remember we did not use the commit on this connection before the crash, so
PostgreSQL will rollback to last transaction state automatically


-- Using Savepoints
1. Simple rollback and commit statements enable us to write or undo an 
entire transaction. However, we might want sometimes a support for a rollback of 
partial transaction. 
2. To support the rollback of partial transaction, we must put placeholders at strategic 
location of the transaction block. Thus, if a rollback is required, you can read
back on the said placeholder.
3. In PostgresSQL these placeholders are called 'Savepoints'
4. Each savepoints takes a unique name that identifies it so that, when you roll back, 
the DBMS knows where you are rolling back to.
 
begin;

update accounts
set balance = balance - 50
where name = 'Adam';

select * from accounts;

savepoint first_update;

update accounts
set balance = balance - 50
where name = 'Adam';

select * from accounts;

rollback to first_update;

select * from accounts;

commit;




-------------------------------------------------------------------------------------
                                    Cursors
-- Creating a cursor
-- Declare a cursor using refcursor data type
-- Opening an unbound cursor

--------------------------------------------------------------------------------------
                                    Crosstab report
-- Installing the crosstab() function
-- Install the extension
create extension if not exists tablefunc;
-- Confirm if the extension is installed
select * from pg_extension;
-- Using crosstab function
-- Lets create a sample table called 'scores'
create table scores (
score_id serial primary key,
name varchar,
subject varchar,
score numeric(4, 2),
score_date date
);
-- Lets insert some sample data
insert into scores (name, subject, score, score_date) values
('Adam', 'Math', 10, '2020-01-01'),
('Adam', 'English', 8, '2020-02-01'),
('Adam', 'History', 7, '2020-03-01'),
('Adam', 'Music', 9, '2020-04-01'),
('Linda', 'Math', 12, '2020-01-01'),
('Linda', 'English', 10, '2020-02-01'),
('Linda', 'History', 8, '2020-03-01'),
('Linda', 'Music', 6, '2020-04-01');
-- Lets create our first pivot table
insert into scores (name, subject, score, score_date) values
('Adam', 'Math', 10, '2020-01-01'),
('Adam', 'English', 8, '2020-02-01'),
('Adam', 'History', 7, '2020-03-01'),
('Adam', 'Music', 9, '2020-04-01'),
('Linda', 'Math', 12, '2020-01-01'),
('Linda', 'English', 10, '2020-02-01'),
('Linda', 'History', 8, '2020-03-01'),
('Linda', 'Music', 6, '2020-04-01');
-- Order matters in crosstab
select * from crosstab
(
'select name, subject, score
from scores order by 1, 2'
) as ct
(
name varchar,
English numeric,
History numeric,
Math numeric,
Music numeric
)
-- Lets build a pivot to display sum of all raindays per each location for each year
select * from crosstab
(
'select location, year,
sum(raindays)::int from rainfalls
group by location, year
order by location, year'
) as ct
(
"Location" text,
"2012" int,
"2013" int,
"2014" int,
"2015" int,
"2016" int,
"2017" int
)
-- Pivoting rows and columns


--------------------------------------------------------------------------------------
                                      Practice
-- Orders shipping to USA or France
select * from orders
where ship_country in ('USA', 'France')
order by ship_country;
-- Count total number of orders shipping to USA or France
select ship_country, count(*) from orders
where ship_country in ('USA', 'France')
group by ship_country;
-- Order shipping to any countries within latin america
select * from orders
where ship_country in ('Argentina', 'Brazil', 'Mexico', 'Venezuela');
-- Show order total amount per each order line
select order_id, product_id, unit_price,
quantity,
(unit_price * quantity) - discount as "revenue"
from order_details;
-- Find the first and the latest order dates
select max(order_date),
min(order_date)
from orders;
-- Total products in each categories
select categories.category_id, categories.category_name,
count(product_id) from categories
inner join products p on categories.category_id = p.category_id
group by categories.category_id, categories.category_name
order by 1;
-- List products that needs re-ordering
select
product_id,
product_name,
units_in_stock,
reorder_level
from products
where units_in_stock <= reorder_level
order by 1;
-- Lest top 5 highest freight charges
select * from orders
order by freight
limit 5;
select ship_country,
avg(freight)
from orders
where extract('Y' from order_date) = extract('Y' from (select max(order_date) from orders))
group by ship_country
order by 2 desc
limit 5;
 -- Customers with no orders
select * from customers c
left join orders o on c.customer_id = o.customer_id
where order_id is null;
-- Top customers with their total order amount spent
select c.customer_id, c.company_name,
sum((od.unit_price * od.quantity) - od.discount) as total_amount
from customers c
join orders o on c.customer_id = o.customer_id
join order_details od on o.order_id = od.order_id
group by 1, 2
order by 3 desc
limit 10;
-- Orders with many line of items
select order_id, count(*)
from order_details
group by order_id;
-- List all late shipped orders
select * from orders
where shipped_date > required_date;
-- Countries with customers or suppliers
select distinct country
from customers
union all
select distinct country
from suppliers
order by country;















                            

--------------------------------------------------------------------------------------

select sum(case
when status not in (4, 5) and creation_time between timestamp '2022-07-31 20:00:00.000000' and timestamp '2022-08-14 19:59:59.999'
and sub_status <> 7 then total_stake_amount else 0
end
) as "Turnover",
sum(case
when status in (6, 7) and sub_status = 7 and
settlement_time between timestamp '2022-07-31 20:00:00.000000' and timestamp '2022-08-14 19:59:59.999'
then final_win_amount else 0
end
) as "Refounded Turnover",
sum(case
when sub_status in (5, 8) and settlement_time between timestamp '2022-07-31 20:00:00.000000' and timestamp '2022-08-14 19:59:59.999'
then final_win_amount else 0
end
) as "Payout",
coalesce(
sum(case
when status not in(4, 5) and payment_time between timestamp '2022-07-31 20:00:00.000000' and timestamp '2022-08-14 19:59:59.999' and
sub_status = 5 and status in (7, 8) then payout_amount else 0
end
),
0
) as "Payments",
count(t_tickets) filter ( where (
creation_time between timestamp '2022-07-31 20:00:00.000000' and timestamp '2022-08-14 19:59:59.999'
and "public"."t_tickets"."status" <> 5
)) as "Bets"
from t_tickets
where creator_type = 2;

---------------------------
select count(case
when lives_on_campus = true then 1
end
) as "on campus",
count(case
when lives_on_campus = false then 1
end
) as "off campus"
from students;

select count(students) filter ( where lives_on_campus = true ) as "On campus",
count(students) filter ( where lives_on_campus = false ) as "Off campus"
from students;

----------------------------------------
select unnest(regexp_split_to_array(mac_address, '-')) from cashier.cashiers
where mac_address <> '';
----------------------------------------

select gender, case when gender = 'F' then 'Female' else 'Male' end
from actors;

select gender, case gender when 'F' then 'Female' else 'Male' end
from actors;

select gender,
case when gender = 'F' then 'Female' else 'Male' end
from actors;


-------------------------------------------------------------------------------------
select count(*) from (
select * from (
select split_part(mac_address, '-', 1) from cashier.cashiers where mac_address <> ''
union
select split_part(mac_address, '-', 2) from cashier.cashiers where mac_address <> ''
union
select split_part(mac_address, '-', 3) from cashier.cashiers where mac_address <> ''
) as "result"
where split_part like '%_%'
) as "count";
--------------------------------------------------------------------------------------
Coalesce
select movie_name, coalesce(movie_length, 0) from movies
where coalesce(movie_length, 0) in (100, 120);

select * from movies
where coalesce(movie_length, 0) > 100;

Nullif
select nullif('a', 'a');

select nullif('a', 'b');

--------------------------------------------------------------------------------------
insert into directors_docs (body)
select row_to_json(a)::jsonb from
(
select director_id,
first_name,
last_name,
date_of_birth,
nationality,
(
select json_agg(x) as all_movies from
(
select movie_name from movies
where director_id = directors.director_id
) x
) from directors
) a;

insert into json(data)
select row_to_json(a)::jsonb from
(
select * from directors
) a;

insert into directors_docs (body)
select row_to_json(a)::jsonb from
(
select director_id,
first_name,
last_name,
date_of_birth,
nationality,
(
select count(x) from
(
select movie_name from movies
where director_id = directors.director_id
) x
) from directors
) a;

--------------------------------------------------------------------------------------
select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where revenues_domestic = (select max(revenues_domestic) from movies_revenues);

select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic = (select revenues_domestic from movies_revenues
where movies_revenues.revenue_id = 5) or mr.revenues_international = (select revenues_international from movies_revenues
where movies_revenues.revenue_id = 5);

select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where (mr.revenues_domestic, mr.revenues_international) = (select revenues_domestic, mr.revenues_international from movies_revenues
where movies_revenues.revenue_id = 5);

select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic > (select avg(revenues_domestic) from movies_revenues);
select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic in (select mr.revenues_domestic from movies_revenues mr
where mr.revenue_id < 5);
------------------
>all
select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic > all (select revenues_domestic from movies_revenues
where movies_revenues.revenue_id = 5);

select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic > all
(select mr.revenues_domestic from movies_revenues mr where mr.revenues_domestic > 400);
select * from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where mr.revenues_domestic > any
(select mr.revenues_domestic from movies_revenues mr where mr.revenues_domestic > 400);

select *,
case
when revenues_domestic > 1000 then 'first'
when revenues_domestic > 500 then 'second'
when revenues_domestic > 300 then 'third'
when revenues_domestic > 100 then 'fourth'
else 'fifth' end
from movies_revenues mr;

with t as (
select
case
when revenues_domestic > 1000 then 'first'
when revenues_domestic > 500 then 'second'
when revenues_domestic > 300 then 'third'
when revenues_domestic > 100 then 'fourth'
else 'fifth' end
from movies_revenues mr) select * from t;

with t as (
select
case
when revenues_domestic > 1000 then 'first'
when revenues_domestic > 500 then 'second'
when revenues_domestic > 300 then 'third'
when revenues_domestic > 100 then 'fourth'
else 'fifth' end as grade
from movies_revenues mr) select * from t
where grade = 'third';

with t as (
select *,
case
when revenues_domestic > 1000 then 'first'
when revenues_domestic > 500 then 'second'
when revenues_domestic > 300 then 'third'
when revenues_domestic > 100 then 'fourth'
else 'fifth' end as grade
from movies_revenues mr) select * from t
where grade = 'third';

with t as (
select *,
case
when revenues_domestic > 1000 then 'first'
when revenues_domestic > 500 then 'second'
when revenues_domestic > 300 then 'third'
when revenues_domestic > 100 then 'fourth'
else 'fifth' end as grade
from movies_revenues mr) select * from t
left join movies m on t.movie_id = m.movie_id;

-----------------
exists
select *
from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
where exists
(select mr.revenues_domestic from movies_revenues mr where mr.revenues_domestic > 400);

select * from movies
where exists (select * from movies);

not exists
select * from movies
where not exists (select * from movies);
---------------------------------------------------------
select * from movies
where (movie_id, director_id) in (select revenue_id, movie_id from movies_revenues);

-----------------------------------------------------
select m.movie_id, m.movie_name, m.movie_lang, mr.revenues_domestic,
sum (mr.revenues_domestic) over (order by m.movie_name)
from movies m
inner join movies_revenues mr on m.movie_id = mr.movie_id
order by 1;



Комментарии

Популярные сообщения из этого блога

Lesson1: JDK, JVM, JRE

SE_21_Lesson_11: Inheritance, Polymorphism

SE_21_Lesson_9: Initialization Blocks, Wrapper types, String class