SysAdmins shell one liners
Storage
Listing files in that one production network area with 1M+ files.
ls -1U
Top 10 largest files and folders
du -a . | sort -n -r | head -n 10
Top 30 largest files on this FS in MB
find . -mount -type f -ls|sort -rnk7 |head -30|awk '{printf "%10d MB\t%s\n",($7/1024)/1024,$NF}'
Top 30 largest files and folders on this FS, limited to 4 decent folders, in MB
du -x --max-depth=4 .|sort -rn|head -n30|awk '{printf "%10d MB\t%s\n",$1/1024,$2}'
Size of files matching pattern
ls -al files_* | awk '{ total += $5 }; END { print total }'
Top 40 largest ZFS
df -k -t zfs --output=used,target | sort -rn | head -40
Finding files in date range.
touch --date "2020-05-01" /tmp/start
touch --date "2020-05-02" /tmp/end
find /var/log -type f -newer /tmp/start -not -newer /tmp/end
copying partition tables
sfdisk -d /dev/sda | sfdisk /dev/sdb
Force clearing a zfs label (are you sure you want to really do this?!)
dd bs=512 if=/dev/zero of=/dev/sdX count=2048 seek=$(($(blockdev --getsz /dev/sdX) - 2048))
dd bs=512 if=/dev/zero of=/dev/sdX count=2048
Importing a zpool by-id
zpool export data
zpool import -f -d /dev/disk/by-id data
Sending zfs
zfs send data@backup | ssh [email protected] 'zfs receive -F backup/host1/data'
Sending fast with mbuffer
zfs send data@backup | mbuffer -s 128k -m 1G 2>/dev/null | ssh [email protected] 'mbuffer -s 128k -m 1G | zfs receive -F backup/host1/data'
Sending fastest with nc
- on sender:
zfs send -c data@backup | nc -w 20 backup-1.home.lan 9090
- on reciever:
nc -w 2 -l -p 9090 | zfs receive -F backup/host1/data
Recieving (pulling) a zfs volume
ssh [email protected] 'zfs send backup/host1/data@backup' | zfs receive -F data
Mount zpool by id
zpool import -d /dev/disk/by-id -aN
Processes
Top 30 processes using SWAP
find /proc -maxdepth 1 -type d -name "[0-9]*" -exec grep -H VmSwap {}/status \; | sort -n -r -k2 | head -30
find /proc -maxdepth 2 -path "/proc/[0-9]*/status" -readable -exec awk -v FS=":" '{process[$1]=$2;sub(/^[ \t]+/,"",process[$1]);} END {if(process["VmSwap"] && process["VmSwap"] != "0 kB") printf "%10s %-30s %20s\n",process["Pid"],process["Name"],process["VmSwap"]}' '{}' \; | awk '{print $(NF-1),$0}' | sort -h | cut -d " " -f2-
cron to log cpu usage. Useful for shared host with alot of remote processes / batch jobs.
*/5 * * * * /bin/date >> /root/cpu_usage.log && /bin/ps -eo pcpu,start,pid,user,args | sort -k 1 -r | head -20 >> /root/cpu_usage.log
Networking
Watching for new tcp connections
tcpdump "tcp[tcpflags] & (tcp-syn) != 0"
Scan network for an open port
nmap -sV -p 8080 192.168.0.0/24
Checking for multiple MAC addresses on network
arping -I bond0 10.0.1.0
reviewing multicast addresses on host
ip maddress
Cipher and protocol enumerations
nmap --script ssl-enum-ciphers -p 443 google.com
Certificate Check
openssl s_client -connect www.paulerrington.co.uk:443
Virtual host certificate check
openssl s_client -connect www.paulerrington.co.uk:443 -servername api.paulerrington.co.uk
Certificate expiry check
openssl s_client -connect www.paulerrington.co.uk:443 2>/dev/null | openssl x509 -noout -dates
Check certificate
openssl x509 -in paulerrington.co.uk.cert -text | grep Subject
CSR
openssl req -nodes -newkey rsa:2048 -keyout paulerrington.co.uk.key -out paulerrington.co.uk.csr -subj "/C=GB/postalCode=XX1 2XX/ST=ABC/L=ABC/street=1st office,ind estate, area/O=PAUL ERRINGTON/OU=Infrastructure/OU=SSL/CN=paulerrington.co.uk"
Check CSR
openssl req -noout -text -in paulerrington.co.uk.csr | grep Subject
Check deployed certificate
nmap -sV -sC paulerrington.co.uk
clear ldap user caching issue
sss_cache -u paul.errington
MySQL
select processes not sleeping
SELECT * FROM information_schema.processlist WHERE command != 'Sleep' ORDER BY id;
select processes running for more than 1 minute (time==60seconds)
SELECT id, user, time, state, info FROM information_schema.processlist WHERE command != 'Sleep' AND time >= 60 ORDER BY time DESC, id \G
Kill a users running queries
SELECT group_concat('KILL ',id,';' SEPARATOR ' ') from (SELECT id FROM information_schema.processlist WHERE user = 'paul.errington') as thisuser;
Purge binary logs older than 10 days minimum ideal retention.
/bin/mysql -e "PURGE BINARY LOGS BEFORE DATE_SUB( NOW( ), INTERVAL 10 DAY);"
Move Binlogs to free up space (dont copy large numbers limit to 10 oldest at a time)
mkdir -p /root/temp_binlogs
head -10 mysql-bin.index
ionice -c 3 mv mysql-bin.{000001..000010} /root/temp_binlogs
Export to csv
# locate writable folder, ensure sufficient storage.
SELECT @@GLOBAL.secure_file_priv;
# export select as csv
SELECT * FROM MY_TABLE INTO OUTFILE '/data/mysql/export_my_table.csv'
FIELDS TERMINATED BY ','
ENCLOSED BY '"'
LINES TERMINATED BY '\n';
Database size in MB
SELECT table_schema "DB Name",
ROUND(SUM(data_length + index_length) / 1024 / 1024, 1) "DB Size in MB"
FROM information_schema.tables
GROUP BY table_schema;
Skip binlog (prevent propagation of DDL i.e. long alter table/index statements)
SET sql_log_bin=OFF
Skip a replication error (duplicate keys, indexes)
SET GLOBAL sql_slave_skip_counter = 1
Skip replication errors by code, add to my.cnf and restart
slave-skip-errors = 1062,1032
Use mysqldump to create second slave replica backup. slave statement will be MySQL master. WARNING: IO thread will continue but slave thread will be stopped during mysqldump!
mysqldump --dump-slave --master-data=2 --single-transaction productionDB | gzip --fast -c > productionDB_YYYYMMDD.sql.gz
Fast DB restore (skip bin log)
gunzip -c productionDB_YYYYMMDD.sql.gz | mysql --init-command="SET SQL_LOG_BIN = 0;"
PostgreSQL
Standby replication lag
SELECT now()-pg_last_xact_replay_timestamp() as replication_lag;
Blocking processes queued
SELECT pid, usename, pg_blocking_pids(pid) as blocked_by,
query as blocked_query
FROM pg_stat_activity
WHERE cardinality(pg_blocking_pids(pid)) > 0;
Blocking processes
SELECT
blocked_locks.pid AS blocked_pid,
blocked_activity.usename AS blocked_user,
blocking_locks.pid AS blocking_pid,
blocking_activity.usename AS blocking_user,
blocked_activity.query AS blocked_statement,
blocking_activity.query AS current_statement_in_blocking_process
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED;
Blocked and blocking queries.
SELECT
activity.pid,
activity.usename,
activity.query,
blocking.pid AS blocking_id,
blocking.query AS blocking_query
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON blocking.pid = ANY(pg_blocking_pids(activity.pid));
Blocking queries runtimes over 1min.
SELECT blocking.pid AS blocking_id
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON blocking.pid = ANY(pg_blocking_pids(activity.pid))
where extract( MINUTES FROM now() - blocking.query_start) > 1;
Show count of queries that are causing blocking.
SELECT count(blocking.pid) AS blocking_count
FROM pg_stat_activity AS activity
JOIN pg_stat_activity AS blocking ON blocking.pid = ANY(pg_blocking_pids(activity.pid))
where extract( MINUTES FROM now() - blocking.query_start) > 10;
Stop a blocking query.
SELECT pg_cancel_backend(PID_HERE);
Stop a blocking query that will not cancel.
SELECT pg_terminate_backend(PID_HERE);
Export to CSV
\c mydb
\COPY ( SELECT * MY_TABLE ) to '/tmp/export_my_table.csv' csv header
Index usage
\COPY ( SELECT
idstat.relname AS TABLE_NAME,
indexrelname AS index_name,
idstat.idx_scan AS index_scans_count,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
tabstat.idx_scan AS table_reads_index_count,
tabstat.seq_scan AS table_reads_seq_count,
tabstat.seq_scan + tabstat.idx_scan AS table_reads_count,
n_tup_upd + n_tup_ins + n_tup_del AS table_writes_count,
pg_size_pretty(pg_relation_size(idstat.relid)) AS table_size
FROM
pg_stat_user_indexes AS idstat
JOIN
pg_indexes
ON
indexrelname = indexname
AND
idstat.schemaname = pg_indexes.schemaname
JOIN
pg_stat_user_tables AS tabstat
ON
idstat.relid = tabstat.relid
WHERE
indexdef !~* 'unique'
ORDER BY
idstat.idx_scan DESC,
pg_relation_size(indexrelid) DESC ) to '/tmp/index_usage.csv' csv header