Top 10 largest files and folders

du -a . | sort -n -r | head -n 10

Top 30 largest files on this FS in MB

find . -mount -type f -ls|sort -rnk7 |head -30|awk '{printf "%10d MB\t%s\n",($7/1024)/1024,$NF}'

Top 30 largest files and folders on this FS, limited to 4 decent folders, in MB

du -x --max-depth=4 .|sort -rn|head -n30|awk '{printf "%10d MB\t%s\n",$1/1024,$2}'

Size of files matching pattern

ls -al files_* | awk '{ total += $5 }; END { print total }'

Finding files in date range.

touch --date "2020-05-01" /tmp/start
touch --date "2020-05-02" /tmp/end
find /var/log -type f -newer /tmp/start -not -newer /tmp/end

Sending zfs

zfs send [email protected] | ssh [email protected] 'zfs receive -F backup/host1/data'

Sending fast with mbuffer

zfs send [email protected] | mbuffer -s 128k -m 1G 2>/dev/null | ssh [email protected] 'mbuffer -s 128k -m 1G | zfs receive -F backup/host1/data'

Sending fastest with nc

  • on sender:
zfs send -c [email protected] | nc -w 20 backup-1.home.lan 9090
  • on reciever:
nc -w 2 -l -p 9090 | zfs receive -F backup/host1/data

Recieving (pulling) a zfs volume

ssh [email protected] 'zfs send backup/host1/[email protected]' | zfs receive -F data

Mount zpool by id

zpool import -d /dev/disk/by-id -aN


Top 30 processes using SWAP

find /proc -maxdepth 1 -type d -name "[0-9]*" -exec grep -H VmSwap {}/status \; | sort -n -r -k2 | head -30
find /proc -maxdepth 2 -path "/proc/[0-9]*/status" -readable -exec awk -v FS=":" '{process[$1]=$2;sub(/^[ \t]+/,"",process[$1]);} END {if(process["VmSwap"] && process["VmSwap"] != "0 kB") printf "%10s %-30s %20s\n",process["Pid"],process["Name"],process["VmSwap"]}' '{}' \; | awk '{print $(NF-1),$0}' | sort -h | cut -d " " -f2-


Watching for new tcp connections

tcpdump "tcp[tcpflags] & (tcp-syn) != 0"

Checking for multiple MAC addresses on network

arping -I bond0

reviewing multicast addresses on host

ip maddress

Cipher and protocol enumerations

nmap --script ssl-enum-ciphers -p 443

Certificate Check

openssl s_client -connect

Check certificate

openssl x509 -in -text | grep Subject


openssl req -nodes -newkey rsa:2048 -keyout -out -subj "/C=GB/postalCode=XX1 2XX/ST=ABC/L=ABC/street=1st office,ind estate, area/O=PAUL ERRINGTON/OU=Infrastructure/OU=SSL/"

Check CSR

openssl req -noout -text -in | grep Subject

Check deployed certificate

nmap -sV -sC

clear ldap user caching issue

sss_cache -u paul.errington


select processes not sleeping

SELECT * FROM information_schema.processlist WHERE command != 'Sleep' ORDER BY id;

select processes running for more than 1 minute (time==60seconds)

SELECT id, user, time, state, info FROM information_schema.processlist WHERE command != 'Sleep' AND time >= 60 ORDER BY time DESC, id \G

Kill a users running queries

select group_concat('KILL ',id,';' SEPARATOR ' ') from (select id from information_schema.processlist where user = 'paul.errington') as thisuser;

Purge binary logs older than 10 days minimum ideal retention.


Move Binlogs to free up space (dont copy large numbers limit to 10 oldest at a time)

mkdir -p /root/temp_binlogs
head -10  mysql-bin.index
ionice -c 3 mv mysql-bin.{000001..000010} /root/temp_binlogs

Export to csv

# locate writable folder, ensure sufficient storage.
SELECT @@GLOBAL.secure_file_priv;
# export select as csv 
SELECT * FROM MY_TABLE INTO OUTFILE '/data/mysql/export_my_table.csv'

Database size in MB

SELECT table_schema "DB Name",
        ROUND(SUM(data_length + index_length) / 1024 / 1024, 1) "DB Size in MB" 
FROM information_schema.tables 
GROUP BY table_schema; 

Skip binlog (prevent propagation of DDL i.e. long alter table/index statements)

SET sql_log_bin=OFF

Skip a replication error (duplicate keys, indexes)

SET GLOBAL sql_slave_skip_counter = 1

Use mysqldump to create second slave replica backup. slave statement will be MySQL master. WARNING: IO thread will continue but salve thread will be stopped during mysqldump!

mysqldump --dump-slave --master-data=2 --single-transaction productionDB | gzip --fast -c > productionDB_YYYYMMDD.sql.gz

Fast DB restore (skip bin log)

gunzip -c productionDB_YYYYMMDD.sql.gz | mysql --init-command="SET SQL_LOG_BIN = 0;"


Standby replication lag

select now()-pg_last_xact_replay_timestamp() as replication_lag;

Blocking processes.

SELECT     AS blocked_pid,
    blocked_activity.usename  AS blocked_user,     AS blocking_pid,
    blocking_activity.usename AS blocking_user,
    blocked_activity.query    AS blocked_statement,
    blocking_activity.query   AS current_statement_in_blocking_process
FROM  pg_catalog.pg_locks         blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity  ON =
JOIN pg_catalog.pg_locks         blocking_locks 
    ON blocking_locks.locktype = blocked_locks.locktype
    AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
    AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
    AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
    AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
    AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
    AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
    AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
    AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
    AND !=
    JOIN pg_catalog.pg_stat_activity blocking_activity ON =
WHERE NOT blocked_locks.GRANTED;

Blocked and blocking queries.

    activity.query, AS blocking_id,
    blocking.query AS blocking_query
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON = ANY(pg_blocking_pids(;

Blocking queries runtimes over 1min.

SELECT AS blocking_id
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON = ANY(pg_blocking_pids(
where extract( MINUTES FROM now() - blocking.query_start) > 1;

Show count of queries that are causing blocking.

SELECT count( AS blocking_count
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON = ANY(pg_blocking_pids(
where extract( MINUTES FROM now() - blocking.query_start) > 10;

Stop a blocking query.

SELECT pg_cancel_backend(PID_HERE);

Stop a blocking query that will not cancel.

SELECT pg_terminate_backend(PID_HERE);

Export to CSV

\c mydb
\COPY ( SELECT * MY_TABLE ) to '/tmp/export_my_table.csv' csv header