使用盒子优选ip并上传到github
前提是在盒子中安装好git 和测速软件 https://github.com/XIU2/CloudflareSpeedTest 并且可以把结果推送到tg的bot
然后使用如下脚本即可测速,并且把IP上传到github 当作cm的优选API 参考cm部署代码 https://github.com/cmliu/edgetunnel
前提是在盒子中安装好git 和测速软件 https://github.com/XIU2/CloudflareSpeedTest 并且可以把结果推送到tg的bot
然后使用如下脚本即可测速,并且把IP上传到github 当作cm的优选API 参考cm部署代码 https://github.com/cmliu/edgetunnel
postgreSQL开放事件触发器,可以实现DDL回收站、DDL防火墙、DDL增量订阅同步等功能,灵活使用事件触发器可以减少维护成本,保护数据安全。 RDS PostgreSQL开放事件触发器,可以实现DDL回收站、DDL防火墙、DDL增量订阅同步等功能,灵活使用事件触发器可以减少维护成本,保护数据安全。
备份脚本win
@echo off
echo execute pg_dump
set /p dbname=please input database name:
echo dump database is %dbname%
set /p ipaddress=please input database IP:
echo dump databaseIP is %ipaddress%
D:\ProgramFiles\PostgreSQL\pgAdmin4\v6\runtime\pg_dump.exe --file "D:\\\\%dbname%" --host "%ipaddress%" --port "5432" --username "datalink" --exclude-schema=public --verbose --role "datalink" --format=c --blobs --encoding "UTF8" %dbname%
echo done!
pause备份所有ddl
WITH bloat AS (
WITH t1 AS (
SELECT
schemaname,
tablename,
( 23 + CEIL ( COUNT ( * ) >> 3 ) ) :: BIGINT nullheader,
MAX ( null_frac ) nullfrac,
CEIL ( SUM ( ( 1 - null_frac ) * avg_width ) ) :: BIGINT datawidth
FROM
pg_stats
--where schemaname='form'
GROUP BY
schemaname,
tablename
),
t2 AS (
SELECT
schemaname,
tablename,
( datawidth + 8 - ( CASE WHEN datawidth % 8 = 0 THEN 8 ELSE datawidth % 8 END ) ) -- avg data len
+ ( 1 - nullfrac ) * 24 + nullfrac * ( nullheader + 8 - ( CASE WHEN nullheader % 8 = 0 THEN 8 ELSE nullheader % 8 END ) ) avgtuplelen
FROM
t1
) SELECT C
.oid,
avgtuplelen
FROM
t2 T,
pg_class C,
pg_namespace n
WHERE
T.schemaname = n.nspname
AND C.relname = T.tablename
AND C.relnamespace = n.oid
AND relpages > 100
and c.relname in
( select relname from (
select relname , reltuples::numeric as 记录数
from pg_class
where relkind = 'r'
)t order by 记录数 desc limit 50)
),
parti AS (
SELECT
parti.inhparent,
parti.inhrelid,
stat.n_dead_tup,
c.relpages,
c.reltuples
FROM
(
SELECT
inhparent,
inhrelid
FROM
pg_inherits inh UNION ALL
( SELECT b.inhparent AS grandpa, A.inhrelid me FROM pg_inherits A INNER JOIN pg_inherits b ON A.inhparent = b.inhrelid )
) AS parti
inner join pg_class c on parti.inhrelid = c.oid
LEFT JOIN pg_stat_user_tables stat ON parti.inhrelid = stat.relid
where c.relname in
( select relname from (
select relname , reltuples::numeric as 记录数
from pg_class
where relkind = 'r'
)t order by 记录数 desc limit 50)
) SELECT
n.nspname "tableSchema",
C.relname "tableName",
'vacuum full '||C.relname||';' "sql",
round(SUM ( pg_total_relation_size ( COALESCE ( parti1.inhrelid, C.oid ) )+0.0 )/1024/1024/1024,2) AS "表大小 GB", --表及其子分区、索引大小,包含索引
round(
SUM ( pg_total_relation_size ( COALESCE ( parti1.inhrelid, C.oid ) ) ) / ( SELECT SUM ( pg_database_size ( datname ) ) AS dbsize FROM pg_database ),
4
) AS "占数据文件比例%",-- 表占数据文件比例
to_char(COALESCE ( MAX ( c.reltuples ), 0 ) + COALESCE ( SUM ( COALESCE ( parti1.reltuples, 0 ) ), 0 ),'9999999999') AS "元组(记录行数)",
CASE
WHEN (coalesce(max( bloat.avgtuplelen), 0) = 0 and coalesce(avg( parti1.avgtuplelen), 0) = 0) or MAX ( COALESCE (c.relpages , 0 ) ) + SUM ( COALESCE ( parti1.relpages, 0 )) = 0 THEN
0 ELSE
round((1 - CEIL ( (MAX ( COALESCE ( c.reltuples, 0 )) + SUM ( COALESCE ( parti1.reltuples, 0 ))) *
(case when max( parti1.avgtuplelen) is not null then AVG( parti1.avgtuplelen) else max ( COALESCE ( bloat.avgtuplelen, 0 ) ) end ) / 8168) /
( MAX ( COALESCE (c.relpages , 0 ) ) + SUM ( COALESCE ( parti1.relpages, 0 )) ))::NUMERIC, 2)*100
END AS "表膨胀率%",
COALESCE ( MAX ( stat.n_dead_tup ), 0 ) + COALESCE ( SUM ( COALESCE ( parti1.n_dead_tup, 0 ) ), 0 ) AS "死元组(行)" --表内死数据行
FROM
pg_class
C INNER JOIN pg_namespace n ON C.relnamespace = n.oid
LEFT JOIN pg_stat_user_tables stat ON C.oid = stat.relid
LEFT JOIN (select p.*, b.* from parti p left join bloat as b ON p.inhrelid = b.oid) as parti1 on c.oid = parti1.inhparent
LEFT JOIN bloat ON C.oid = bloat.oid
WHERE
n.nspname NOT IN ( 'pg_catalog', 'information_schema' )
AND n.nspname !~ '^pg_toast'
AND C.relkind IN ( 'r', 'p' )
AND C.oid NOT IN ( SELECT inhrelid FROM pg_inherits )
and c.relname in
( select relname from (
select relname , reltuples::numeric as 记录数,
'truncate table '||relname||' ;'
from pg_class
where relkind = 'r'
)t order by 记录数 desc limit 50)
GROUP BY
C.relname,
n.nspname
ORDER BY
-- "表大小 GB" DESC
"表膨胀率%" DESC
limit 100
#!/bin/bash
#备份功能的话 记得 在 gkit 推送流水线的时候 要做逻辑分区 ,不要和当前的数据库用一个盘
# 备份文件目录
BKUP_DATADIR=/opt/srv/smartcare/hypergis/pg-data
# 日志文件目录
log_dir=/opt/srv/smartcare/hypergis/scripts
#备份日志
LOG_BK_FILE=${log_dir}/backupCtl_bk.log
LOG_FILE=${log_dir}/backupCtl.log
# 日志10M后要转储
MAX_LOGFILE_SIZE=10485760
# 写日志
function logMsg()
{
echo "$1" >> ${LOG_FILE}
}
# 清理日志文件,保留一个历史备份日志
function clearLogFile()
{
logFileSize=$(ls -l ${LOG_FILE} | awk '{print $5}')
if [[ ${logFileSize} -gt ${MAX_LOGFILE_SIZE} ]];then
logMsg "`date '+%Y-%m-%d %H %M %S'`:log file is greater than ${MAX_LOGFILE_SIZE},backup it now"
rm -rf ${LOG_BK_FILE}
mv ${LOG_FILE} ${LOG_BK_FILE}
fi
}
# 备份数据
function backupData()
{
logMsg "`date '+%Y-%m-%d %H %M %S'`:begin backup db"
local filename="datalinkbackup_`date '+%Y-%m-%d'`"
local backfile=${BKUP_DATADIR}'/'${filename}
docker exec $(docker ps |grep gis-postgresql:11.13|awk '{print $1}') sh -c 'source /opt/gis-postgresql/config/postgres_profile.sh && pg_dump --port 1523 --username datalink --no-password -d datalink --host 127.0.0.1 --format=c --blobs --encoding "UTF8" --verbose --file '${backfile}' &>> '${LOG_FILE}''
logMsg "`date '+%Y-%m-%d %H %M %S'`:end backup db"
}
#删除超过3天的备份文件
function clearFile()
{
filePath=$1
fileFlag=$2
logMsg "`date '+%Y-%m-%d %H %M %S'`:begin clearFile"
cd ${filePath}
local fileNum=$(ls -l |grep ${fileFlag}|wc -l)
#Backup files are retained for 7 days.
if [ ${fileNum} -ge 3 ];then
local deleteFile=$(ls -l |grep ${fileFlag} | awk -F ' ' '{print $9}'|awk 'NR==1{print}')
local time=$(stat -c %Y ${deleteFile})
for file in $(ls -l |grep ${fileFlag} | awk -F ' ' '{print $9}')
do
local fileCreatDate=$(stat -c %Y ${file})
if [[ ${fileCreatDate} < ${time} ]];then
deleteFile=${file}
fi
done
if [ -n "${deleteFile}" ];then
#clear old data
rm -rf ${deleteFile}
fi
fi
logMsg "`date '+%Y-%m-%d %H %M %S'`:end clearFile"
}
#backup
function backup_main()
{
#使用walsender进程判断是否主节点
primary_flag=`docker exec $(docker ps |grep gis-postgresql:11.13|awk '{print $1}') sh -c 'source /opt/gis-postgresql/config/postgres_profile.sh && pg_controldata -D /opt/srv/smartcare/hypergis/pg-data | grep "in production" |wc -l'`
if [[ "${primary_flag}" -eq 1 ]] then
#清理备份日志
clearLogFile
logMsg "`date '+%Y-%m-%d %H %M %S'`:this is primary db"
#是主节点就备份数据
backupData ${BKUP_DATADIR}
#删除超过3天的备份文件
clearFile ${BKUP_DATADIR} "datalinkbackup"
else
echo "this is standby db don backup db"
fi
}
backup_main#PostgreSQL中有两类锁:表级锁和行级锁。当要查询、插入、更新、删除表中数据时,首先要获得表级锁,然后获得行级锁。
下面对PostgreSQL数据库锁机制的理解,大部分来自与《PostgreSQL修炼之道 从小工到专家》-唐成书中,以及网络上的博客的总结。通过实际测试发现,还是存在一些不合理的点,后面实际的案列中,会有一些说明。