GreenPlum
GreenPlum

GreenPlum

服务器

# ip                    root密码      gpadmin密码
# 192.168.1.122         123456      2834640451@qq.com 
# 192.168.1.160         6           123456
# 192.168.1.161         6           123456
# 192.168.1.156         root        123456
ip                  root密码      gpadmin密码
192.168.1.123       123456          123456
192.168.1.124       123456          123456
192.168.1.125       123456          123456
192.168.1.126       123456          123456

用户组(每个节点)

su root

groupadd -g 520 gpadmin
useradd -g 520 -u 520 -m -d /home/gpadmin -s /bin/bash gpadmin
mkdir -p /data/greenplum/
chown -R gpadmin:gpadmin /data/greenplum/
mkdir -p /home/gpadmin/
chown -R gpadmin:gpadmin /home/gpadmin/

passwd gpadmin
123456

# 添加/etc/sudoers文件的写权限。也就是输入命令"chmod u+w /etc/sudoers"。
#   注:/etc/sudoers文件是只读文件
chmod u+w /etc/sudoers
vi /etc/sudoers
# 添加
gpadmin ALL=(ALL) ALL
# 撤销/etc/sudoers文件的写权限。也就是输入命令"chmod u-w /etc/sudoers"。
chmod u-w /etc/sudoers

hosts(每个节点)

sudo vi /etc/hosts


192.168.1.170 gp-master
192.168.1.171 gp-datanode01
192.168.1.172 gp-datanode02
192.168.1.173 gp-datanode03


# 验证
ping gp-datanode01
ping gp-datanode02
ping gp-datanode03

sysctl.config(每个节点)

# kernel.shmall = _PHYS_PAGES / 2 =echo $(expr $(getconf _PHYS_PAGES) / 2)
# kernel.shmmax = kernel.shmall * PAGE_SIZE =echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE))
# kernel.shmall、kernel.shmmax根据实际情况配置

# kernel.shmall
echo $(expr $(getconf _PHYS_PAGES) / 2)
# kernel.shmmax
echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE))

sudo vi /etc/sysctl.config

# kernel.shmall = _PHYS_PAGES / 2 =echo $(expr $(getconf _PHYS_PAGES) / 2)
# kernel.shmmax = kernel.shmall * PAGE_SIZE =echo $(expr $(getconf _PHYS_PAGES) / 2 \* $(getconf PAGE_SIZE))
kernel.shmall = 232875
kernel.shmmax = 953856000
kernel.shmmni = 4096
vm.overcommit_memory = 2
vm.overcommit_ratio = 95
net.ipv4.ip_local_port_range = 10000 65535
kernel.sem = 50100 128256000 50100 2560
kernel.sysrq = 1
kernel.core_uses_pid = 1
kernel.msgmnb = 65536
kernel.msgmax = 65536
kernel.msgmni = 2048
net.ipv4.tcp_syncookies = 1
net.ipv4.conf.default.accept_source_route = 0
net.ipv4.tcp_max_syn_backlog = 4096
net.ipv4.conf.all.arp_filter = 1
net.core.netdev_max_backlog = 10000
net.core.rmem_max = 2097152
net.core.wmem_max = 2097152
vm.swappiness = 10
vm.zone_reclaim_mode = 0
vm.dirty_expire_centisecs = 500
vm.dirty_writeback_centisecs = 100
#vm.dirty_background_ratio = 0
#vm.dirty_ratio = 0
#vm.dirty_background_bytes = 1610612736
#vm.dirty_bytes = 4294967296
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10
vm.min_free_kbytes = 240234
#vm.overcommit_memory = 1


# 如果系统内存大于64G
# vm.dirty_background_ratio = 0
# vm.dirty_ratio = 0
# vm.dirty_background_bytes = 1610612736 # 1.5GB
# vm.dirty_bytes = 4294967296 # 4GB

# 如果系统内存小于64G
vm.dirty_background_ratio = 3
vm.dirty_ratio = 10



# 生效
sudo sysctl -p /etc/sysctl.config

limits.conf(每个节点)

sudo vi /etc/security/limits.conf

* soft nofile 65536
* hard nofile 65536
* soft nproc 131072
* hard nproc 131072

# 验证
ulimit -n
ulimit -Hn

master安装

#先安装Greenplum相关依赖包
sudo yum install -y wget net-tools apr apr-util krb5-devel libyaml rsync zip libevent

#通过wget命令下载Greenplum安装包
wget https://download.nuaa.cf/greenplum-db/gpdb/releases/download/6.10.1/greenplum-db-6.10.1-2-rhel7-x86_64.rpm

#RPM安装需要以root用户身份执行,并安装到指定目录/data/greenplum目录下
sudo rpm -ivh --prefix=/data/greenplum/ greenplum-db-6.10.1-2-rhel7-x86_64.rpm

sudo chown -R gpadmin:gpadmin /data/greenplum
#查看安装目录
cd /data/greenplum/greenplum-db
ll

all_hosts和seg_hosts

vi /home/gpadmin/all_hosts
gp-master
gp-datanode01
gp-datanode02
gp-datanode03

vi /home/gpadmin/seg_hosts
gp-datanode01
gp-datanode02
gp-datanode03

chown -R gpadmin:gpadmin /home/gpadmin/

环境变量

#在gpadmin的~/.bash_profile 添加环境变量  (/etc/profile不加)
su gpadmin
vi ~/.bash_profile

export GP_HOME=/data/greenplum/greenplum-db

GPHOME=/data/greenplum/greenplum-db-6.10.1

PYTHONHOME="${GPHOME}/ext/python"
export PYTHONHOME

PATH="${PYTHONHOME}/bin:${PATH}"
LD_LIBRARY_PATH="${PYTHONHOME}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
PYTHONPATH="${GPHOME}/lib/python"
PATH="${GPHOME}/bin:${PATH}"
LD_LIBRARY_PATH="${GPHOME}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"

if [ -e "${GPHOME}/etc/openssl.cnf" ]; then
    OPENSSL_CONF="${GPHOME}/etc/openssl.cnf"
fi

export GPHOME
export PATH
export PYTHONPATH
export LD_LIBRARY_PATH
export OPENSSL_CONF
export MASTER_DATA_DIRECTORY=/data/greenplum/gpdata/gpmaster/gpseg-1
export PGPORT=5432


#环境变量生效(如果python包或gpssh、gpscp报错,重新运行此代码)
source ~/.bash_profile

# 验证
echo $GP_HOME 
echo $GPHOME
echo $PYTHONHOME
echo $PATH
# env

ssh密钥

# 保证各台设备环境

## 确保 ~/.ssh  目录权限正确
restorecon -Rv ~/.ssh

## 确保 /etc/sshd_config  包含以下配置,修改后请执行  sudo systemctl restart sshd  生效
PermitRootLogin yes
PasswordAuthentication yes
GSSAPIAuthentication no

## 确保 /home/gpadmin  权限正确(sudo systemctl status sshd  可查看连接失败的信息)
## ls -ld /home/gpadmin    输出应为  drwxr-x---   不一致请执行以下命令
sudo chown gpadmin:gpadmin /home/gpadmin
sudo chmod 750 /home/gpadmin
# 第一步:在各个节点服务器上生成ssh密钥 gpadmin用户
ssh-keygen
y
连续enter

ssh-copy-id gp-master
ssh-copy-id gp-datanode01
ssh-copy-id gp-datanode02
ssh-copy-id gp-datanode03


####    或使用以下自动化脚本
sudo yum install -y expect
vi /data/greenplum/greenplum-db/delssh.sh 
#!/usr/bin/bash
ssh-keygen -R gp-master
ssh-keygen -R gp-datanode01
ssh-keygen -R gp-datanode02
ssh-keygen -R gp-datanode03
rm -rf ~/.ssh
mkdir ~/.ssh
restorecon -Rv ~/.ssh

vi /data/greenplum/greenplum-db/ssh.sh 
#!/usr/bin/expect
spawn ssh-keygen -t rsa -b 2048
expect {
    "Overwrite" {send "y\n"; exp_continue}
    "Enter file in" {send "\n"; exp_continue}
    "Enter passphrase" {send "\n"; exp_continue}
    "Enter same passphrase" {send "\n"; exp_continue}
}
spawn ssh-copy-id gp-master
expect {
    "yes/no" {send "yes\n"; exp_continue}
    "*assword" {send "123456\n"; exp_continue}
}
spawn ssh-copy-id gp-datanode01
expect {
    "yes/no" {send "yes\n"; exp_continue}
    "*assword" {send "123456\n"; exp_continue}
}
spawn ssh-copy-id gp-datanode02
expect {
    "yes/no" {send "yes\n"; exp_continue}
    "*assword" {send "123456\n"; exp_continue}
}

/data/greenplum/greenplum-db/delssh.sh
/data/greenplum/greenplum-db/ssh.sh
####



# 交换秘钥
gpssh-exkeys -f /home/gpadmin/all_hosts

#在各机子上验证
ssh gp-master
ssh gp-datanode01
ssh gp-datanode02
ssh gp-datanode03

# ssh连通测试(若失敗,重启各个服务器再试)
gpssh -f /home/gpadmin/all_hosts -e 'free'

gpscp -f /home/gpadmin/all_hosts /home/gpadmin/seg_hosts =:/home/gpadmin #分发seg_hosts
gpscp -f /home/gpadmin/all_hosts /home/gpadmin/all_hosts =:/home/gpadmin #分发all_hosts

分发

gpssh -f /home/gpadmin/seg_hosts mkdir -p /data/greenplum/
gpssh -f /home/gpadmin/seg_hosts chown -R gpadmin:gpadmin /data/greenplum/

#把主节点安装好的程序压缩后分发至每个子节点
cd /data/greenplum/
tar -cf gpdb6.10.1.tar greenplum-db-6.10.1/ #压缩
gpscp -f /home/gpadmin/seg_hosts gpdb6.10.1.tar =:/data/greenplum/ #分发

#批量解压文件
gpssh -f /home/gpadmin/seg_hosts tar -xf /data/greenplum/gpdb6.10.1.tar -C /data/greenplum/ 

# 验证解压
gpssh -f /home/gpadmin/all_hosts -e ls -l $GPHOME

# 每个节点
gpssh -f /home/gpadmin/seg_hosts chown -R gpadmin:gpadmin /data/greenplum/

gpssh -f /home/gpadmin/seg_hosts ln -s /data/greenplum/greenplum-db-6.10.1 /data/greenplum/greenplum-db #建立软链接

master、primary、mirror目录

#在所有节点创建master目录
gpssh -f /home/gpadmin/all_hosts mkdir -p  /data/greenplum/gpdata/gpmaster
 #在从节点分布创建primary目录和mirror目录
gpssh -f /home/gpadmin/seg_hosts mkdir -p /data/greenplum/gpdata/primary
gpssh -f /home/gpadmin/seg_hosts mkdir -p /data/greenplum/gpdata/mirror

目录owner设置

# 递归修改目录下全部文件的主人为用户gpadmin
gpssh -f /home/gpadmin/seg_hosts chown -R gpadmin:gpadmin /data/greenplum/
gpssh -f /home/gpadmin/seg_hosts chown -R gpadmin:gpadmin /home/gpadmin

gpinitsystem_config

vi /data/greenplum/greenplum-db-6.10.1/docs/cli_help/gpconfigs/gpinitsystem_config
先注释掉参数值

ARRAY_NAME="Greenplum"
SEG_PREFIX=gpseg
PORT_BASE=40000
declare -a DATA_DIRECTORY=(/data/greenplum/gpdata/primary /data/greenplum/gpdata/primary) #以每个数据节点上面创建3个Segment实例为例
MASTER_HOSTNAME=gp-master
MASTER_DIRECTORY=/data/greenplum/gpdata/gpmaster
MASTER_PORT=5432
TRUSTED_SHELL=/usr/bin/ssh  
CHECK_POINT_SEGMENTS=8
ENCODING=UNICODE
MIRROR_PORT_BASE=50000
REPLICATION_PORT_BASE=41000
MIRROR_REPLICATION_PORT_BASE=51000
declare -a MIRROR_DATA_DIRECTORY=(/data/greenplum/gpdata/mirror /data/greenplum/gpdata/mirror) #对应的镜像实例数量要和主实例一致;如果不创建镜像则留空
MACHINE_LIST_FILE=/home/gpadmin/seg_hosts

gpscp -f /home/gpadmin/seg_hosts /data/greenplum/greenplum-db-6.10.1/docs/cli_help/gpconfigs/gpinitsystem_config =:/data/greenplum/greenplum-db-6.10.1/docs/cli_help/gpconfigs/ #分发

添加swap分区

# 添加swap分区(每个服务器上)
sudo dd if=/dev/zero of=/var/swapfile bs=1024 count=2097152

# (每个服务器上)
sudo yum install net-tools

greenplum初始化

# 验证每个节点的配置
gpssh -f /home/gpadmin/all_hosts cat /etc/hosts

gpssh -f /home/gpadmin/all_hosts cat /etc/sysctl.config

gpssh -f /home/gpadmin/all_hosts cat /etc/security/limits.conf

gpssh -f /home/gpadmin/all_hosts cat /home/gpadmin/all_hosts

gpssh -f /home/gpadmin/all_hosts cat /home/gpadmin/seg_hosts

# master
cat ~/.bash_profile

gpssh -f /home/gpadmin/all_hosts cat /data/greenplum/greenplum-db-6.10.1/docs/cli_help/gpconfigs/gpinitsystem_config



su gpadmin

gpinitsystem -a -c /data/greenplum/greenplum-db-6.10.1/docs/cli_help/gpconfigs/gpinitsystem_config -s gp-datanode03 -h /home/gpadmin/seg_hosts

Continue with Greenplum creation Yy|Nn (default=N):
Y

# 数据库测试
[gpadmin@gp-master ~]$  psql -d postgres

postgis插件安装

#安装postgis插件
下载地址 https://network.pivotal.io/products/vmware-tanzu-greenplum#/releases/797473/file_groups/3133

gpscp -f /home/gpadmin/seg_hosts "/home/gpadmin/Downloads/postgis-2.5.4+pivotal.3.build.7-gp6-rhel7-x86_64.gppkg" =:/home/gpadmin/Downloads/ #分发

gpssh -f /home/gpadmin/seg_hosts ll /home/gpadmin/Downloads/

gppkg -i '/home/gpadmin/Downloads/postgis-2.5.4+pivotal.3.build.7-gp6-rhel7-x86_64.gppkg'
# 卸载
# gppkg -r  postgis-2.5.4+pivotal.2.build.6-gp6-rhel7-x86_64.gppkg

gpssh -f /home/gpadmin/all_hosts ll /data/greenplum/greenplum-db-6.10.1/share/postgresql/contrib/postgis-2.5/install/

# 1.使postgis扩展生效(仅针对当前 databasename 数据库)
psql -d xiangcun
xiangcun=# CREATE EXTENSION postgis;

# 另一种安装方法(仅针对当前 databasename 数据库)等同于 CREATE EXTENSION postgis;
psql -d xiangcun -f ${GPHOME}/share/postgresql/contrib/postgis-2.5/install/postgis.sql
# psql -d mydatabase -f ${GPHOME}/share/postgresql/contrib/postgis-2.1/postgis_comments.sql
psql -d mydatabase -f ${GPHOME}/share/postgresql/contrib/postgis-2.5/install/rtpostgis.sql
# psql -d mydatabase -f ${GPHOME}/share/postgresql/contrib/postgis-2.1/raster_comments.sql
psql -d mydatabase -f ${GPHOME}/share/postgresql/contrib/postgis-2.5/install/spatial_ref_sys.sql

错误解决

/bin/touch: cannot touch ‘/data/greenplum/gpdata/gpmaster/tmp_file_test’: No such file or directory
20221219:20:50:30:061437 gpinitsystem:mml:gpadmin-[FATAL]:-Cannot write to /data/greenplum/gpdata/gpmaster on master host  Script Exiting!
解决:创建的目录和配置参数的目录要一致:MASTER_DIRECTORY=/data/greenplum/gpdata/gpmaster

20221221:23:05:02:011848 gpinitsystem:mml1:gpadmin-[FATAL]:-Standby host directory /data/greenplum/gpdata/gpmaster, does not exist Script Exiting!
解决:从服务器也要新建/data/greenplum/gpdata/gpmaster目录

20221219:20:54:15:062213 gpinitsystem:mml:gpadmin-[WARN]:-Master hostname gp-master does not match hostname output

20221219:20:54:16:062213 gpinitsystem:mml:gpadmin-[FATAL]:-Number of primary directories does not match number of mirror directories Script Exiting!
解决:DATA_DIRECTORY 和 MIRROR_DATA_DIRECTORY 配置检查,缩进检查,注意注释要前要空一格

File system loop detected; ‘/data/greenplum/greenplum-db-6.10.1/greenplum-db-6.10.1’ is part of the same file system loop as ‘/data/greenplum/greenplum-db-6.10.1

WARNING: enabling "trust" authentication for local connections
You can change this by editing pg_hba.conf or using the option -A, or
--auth-local and --auth-host, the next time you run initdb.

Success. You can now start the database server using:
    /data/greenplum/greenplum-db-6.10.1/bin/postgres -D /data/greenplum/gpdata/gpmaster/gpseg-1
or
    /data/greenplum/greenplum-db-6.10.1/bin/pg_ctl -D /data/greenplum/gpdata/gpmaster/gpseg-1 -l logfile start

System was started in master-only utility mode - only utility mode connections are allowed
解决: sudo yum install net-tools

Some primary/mirror segment pairs were found to be not in sync Script Exiting!
解决: 添加swap分区(每个服务器上)
sudo dd  if=/dev/zero  of=/var/swapfile  bs=1024  count=2097152

gpcreateseg.sh:mml1:gpadmin-[FATAL]:- Command export GPHOME=/data/greenplum/greenplum-db-6.10.1; . /data/greenplum/greenplum-db-6.10.1/greenplum_path.sh; rm -rf /data/greenplum/gpdata/mirror/gpseg2; /data/greenplum/greenplum-db-6.10.1/bin/pg_basebackup --xlog-method=stream --slot='internal_wal_replication_slot' -R -c fast -E ./db_dumps -E ./gpperfmon/data -E ./gpperfmon/logs -D /data/greenplum/gpdata/mirror/gpseg2 -h gp-datanode02 -p 40000 --target-gp-dbid 10; on gp-datanode03 failed with error status 1
解决: 查看/etc/hosts和/home/gpadmin/seg_hosts是否配置正确

"Failed system call was semget(50001031, 17, 03600).","This error does *not* mean that you have run out of disk space.  It occurs when either the system limit for the maximum number of semaphore sets (SEMMNI), or the system wide maximum number of semaphores (SEMMNS), would be exceeded.  You need to raise the respective kernel parameter.  Alternatively, reduce PostgreSQL's consumption of semaphores by reducing its max_connections parameter."
解决:sysctl.config按情况配置

greenplum完整卸载

# cd到/home/gpadmin/gpAdminLogs/,bash最新文件
bash /home/gpadmin/gpAdminLogs/backout_gpinitsystem_gpadmin_20221221_231004

# gpssh -f /home/gpadmin/seg_hosts rm -rf /data/greenplum/
# rm -rf /data/greenplum/greenplum-db-6.10.1/
# rm -rf /data/greenplum/greenplum-db
# gpssh -f /home/gpadmin/seg_hosts rm -rf /data/greenplum/greenplum-db-6.10.1/
# gpssh -f /home/gpadmin/seg_hosts rm -rf /data/greenplum/greenplum-db
# gpssh -f /home/gpadmin/seg_hosts rm -f /data/greenplum/gpdb6.10.1.tar

sudo yum remove greenplum-db-6.x86_64
rm -rf /data/greenplum/

#删除相应环境变量 root的/etc/profile和gpadmin的~/.bash_profile 
gedit /etc/profile
source /etc/profile

su gpadmin
gedit ~/.bash_profile 
source ~/.bash_profile 

## 删除用户
userdel gpadmin
## 删除用户组
groupdel gpadmin
groupdel 520

# 清除密钥交互信息
su root
su gpadmin
cd ~/.ssh
more known_hosts
ssh-keygen -R gp-master
ssh-keygen -R gp-datanode01
ssh-keygen -R gp-datanode02
ssh-keygen -R gp-datanode03
ssh-keygen -R 192.168.1.160
ssh-keygen -R 192.168.1.161
ssh-keygen -R 192.168.1.122
ssh-keygen -R 192.168.1.123
ssh-keygen -R 192.168.1.124
ssh-keygen -R 192.168.1.125
ssh-keygen -R 192.168.1.126
ssh-keygen -R mml
ssh-keygen -R mml1
ssh-keygen -R mml2
ssh-keygen -R mml3
ssh-keygen -R mml4

greenplum关闭并清理

cd /home/gpadmin/gpAdminLogs/
ls

bash backout_gpinitsystem_gpadmin_20191218_203938

gpstop
# 若执行后仍然未清理干净,可执行一下语句后,再重新安装:
pg_ctl -D /data/master/gpseg-1 stop
gpssh -f /home/gpadmin/all_hosts rm -f /tmp/.s.PGSQL.5432 /tmp/.s.PGSQL.5432.lock
gpssh -f /home/gpadmin/all_hosts rm -rf /data/greenplum/gpdata/gpmaster/gpseg*
# 所有数据节点 
# gpssh -f /home/gpadmin/all_hosts ll /data/greenplum/gpdata/primary/
gpssh -f /home/gpadmin/all_hosts rm -rf /data/greenplum/gpdata/primary/gpseg*
gpssh -f /home/gpadmin/all_hosts rm -rf /data/greenplum/gpdata/mirror/gpseg*

gp命令

# 恢复
gprecoverseg -a 
# 启动
gpstart -a 
# 状态
gpstate -e -a 
# 停止
gpstop  -a 
# 重启
gpstop -r -a 
# 使用gpstop工具重新载入配置文件更改而不关闭系统
gpstop -u -a 
# 以快速模式停止Greenplum数据库:在关闭前回滚所有正在进行中的事务并且中断所有连接。
gpstop -M fast -a 

# 关闭数据库
gpstop -a 
# 开启数据库
gpstart -a

# 查看max_connections属性
gpconfig -s max_connections

#错误日志
cd /home/gpadmin/gpAdminLogs/

# gp的日志不是.log结尾的 是在下边的路径下的.csv文件
cd /data/greenplum/gpdata/gpmaster/gpseg-1/pg_log
# 可以使用 -t 根据实际进行降序排序 查看最新的日志文件
ll -t

验证

#ssh连通测试
gpssh -f /home/gpadmin/all_hosts -e 'free'

#验证安装
gpssh -f /home/gpadmin/all_hosts -e ls -l $GPHOME

#集群的网络性能测试
gpcheckperf -f /home/gpadmin/seg_hosts -r N -d /tmp

#生产环境测试
gpcheckperf -f /home/gpadmin/seg_hosts -r ds -D -d /data/greenplum/gpdata/primary


#数据库测试
[gpadmin@gp-master ~]$  psql -d postgres
psql (9.4.20)
Type "help" for help.
postgres=#
#退出
\q

psql配置

## 配置客户端认证

# 登录psql
psql -d postgres

# psql 创建新角色 postgres
CREATE ROLE postgres WITH LOGIN; 
ALTER ROLE postgres WITH PASSWORD 'password'; #密码
ALTER ROLE postgres SET search_path TO myschema, public;
ALTER ROLE postgres SUPERUSER; #超级管理员

# 编辑pg_hba.conf文件
vi $MASTER_DATA_DIRECTORY/pg_hba.conf 

# 加以下内容
# 允许postgres用户本地访问所有数据库
# 免密登录
local   all   postgres   trust         
host    all   postgres   127.0.0.1/32   trust
host    all   postgres   ::1/128        trust
# 允许'postgres'角色,通过任意IP地址,使用md5加密密码,授权用户访问任意数据库。
# host    all   postgres   0.0.0.0/0  md5
host    all   all        0.0.0.0/0  md5

# 重新加载pg_hba.conf配置文件以使更改生效
gpstop -u

# 本地登录和远程登录测试
psql -d postgres -U postgres

# 用户和密码
postgres  
password

# 限制并发连接
#  建议在segment节点的max_connections参数值是master节点的5-10倍。
#  当设置max_connections时,必须设置依赖参数max_prepared_transactions。该值至少与master节点上的max_connections取值相等。segment实例节点也设置相同的值。

# 在$MASTER_DATA_DIRECTORY/postgresql.conf文件(包括standby master节点)
max_connections=100
max_prepared_transactions=100
vi $MASTER_DATA_DIRECTORY/postgresql.conf
# 在所有segment实例节点SEGMENT_DATA_DIRECTORY/postgresql.conf文件
max_connections=500
max_prepared_transactions=100

# 更改允许的连接数
# 登录master节点主机gpadmin用户
# 设置max_connections参数取值。此gpconfig命令将segment实例节点上的参数值设置为1000,将master节点上的参数值设置为200。
gpconfig -c max_connections -v 1000 -m 200 #推荐segment节点的该参数值是master节点的5-10倍。
# 设置max_prepared_transactions参数取值。
gpconfig -c max_prepared_transactions -v 200 # segment节点的该参数值必须大于等于master节点的参数值。
gpstop -r # 停止并重启Greenplum数据库系统
gpconfig -s max_connections # 此gpconfig命令显示max_connections参数的取值。
gpconfig -s max_prepared_transactions

psql

psql -h 192.168.1.123 -p 5432 -d postgres -U postgres -W

2、查看数据库操作

2.1、列举数据库:\l

2.2、选择数据库:\c 数据库名

2.3、查看该某个库中的所有表:\dt

2.4、切换数据库:\c 数据库名

2.5、查看某个库中的某个表结构:\d 表名

注:此处如果需要切换shcema可以使用:set search_path = 需要切换的shcema;

2.6、显示字符集:\encoding
资源下载
Greenplum自动化安装脚本此资源下载价格为188QL币,请先
GreenPlum增量备份此资源下载价格为8888QL币,请先
客服: 273641837