hadoop-SSH免密登录配置
一:配置基础环境
一、修改主机名
修改 master 机器主机名
[root@server ~]# hostnamectl set-hostname master-wzg
[root@server ~]# bash
[root@master-wzg ~]# hostname
master-wzg
修改 slave1 机器主机名
[root@client ~]# hostnamectl set-hostname slave1-wzg
[root@client ~]# bash
[root@slave1-wzg ~]# hostname
slave1-wzg
修改 slave2 机器主机名
[root@localhost ~]# hostnamectl set-hostname slave2-wzg
[root@localhost ~]# bash
[root@slave2-wzg ~]# hostname
slave2-wzg
二、配置网络环境 master-wzg的IP为10.10.10.128
slave1-wzg的IP为10.10.10.129
slave2-wzg的IP为10.10.10.130
子网掩码均为255.255.255.0
所有网关均为10.10.10.2
DNS均设置为114.114.114.114
【hadoop-SSH免密登录配置】master节点:(slave1和slave2的IP为10.10.10.129和10.10.10.130)
[root@server ~]# vim /etc/sysconfig/network-scripts/ifcfg-ens33
TYPE=Ethernet
BOOTPROTO=static
DEFROUTE=yes
PEERDNS=yes
PEERROUTES=yes
IPV4_FAILURE_FATAL=no
IPV6INIT=yes
IPV6_AUTOCONF=yes
IPV6_DEFROUTE=yes
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
IPV6_FAILURE_FATAL=no
IPV6_ADDR_GEN_MODE=stable-privacy
NAME=ens33
UUID=da1a701d-8cee-4e1d-9423-56280232e595
DEVICE=ens33
ONBOOT=yes
IPADDR=10.10.10.128
PREFIX=24
GATEWAY=10.10.10.2
DNS1=114.114.114.114[root@server ~]# systemctl restart network
查看 master IP地址
[root@master-wzg ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:af:2f:d2 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.128/24 brd 10.10.10.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::9ef7:e697:cc63:418b/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:08:d4:17 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:08:d4:17 brd ff:ff:ff:ff:ff:ff
查看 slave1 IP地址
[root@slave1-wzg ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:91:3d:e2 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.129/24 brd 10.10.10.255 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::8f79:feb9:1325:f537/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:58:7d:55 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:58:7d:55 brd ff:ff:ff:ff:ff:ff
查看 slave1 IP地址
[root@slave2-wzg ~]# ip a
1: lo: mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens32: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2a:a9:80 brd ff:ff:ff:ff:ff:ff
inet 10.10.10.130/24 brd 10.10.10.255 scope global ens32
valid_lft forever preferred_lft forever
inet6 fe80::2e7b:ba70:8834:5425/64 scope link
valid_lft forever preferred_lft forever
3: virbr0: mtu 1500 qdisc noqueue state DOWN qlen 1000
link/ether 52:54:00:5a:cb:78 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
valid_lft forever preferred_lft forever
4: virbr0-nic: mtu 1500 qdisc pfifo_fast master virbr0 state DOWN qlen 1000
link/ether 52:54:00:5a:cb:78 brd ff:ff:ff:ff:ff:ff
[root@slave2-wzg ~]#
三、配置域名解析 分别修改“/etc/hosts”配置文件
10.10.10.128 master-wzg master.example.com
10.10.10.129 slave1-wzg slave1.example.com
10.10.10.130 slave2-wzg slave2.example.com
master节点:
[root@master-wzg ~]# vi /etc/hosts
127.0.0.1localhost localhost.localdomain localhost4 localhost4.localdomain4
::1localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128master-wzgmaster.example.com
10.10.10.129slave1-wzgslave1.example.com
10.10.10.130slave2-wzgslave2.example.com
slave1节点:
[root@slave1-wzg ~]# vi /etc/hosts
127.0.0.1localhost localhost.localdomain localhost4 localhost4.localdomain4
::1localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128master-wzgmaster.example.com
10.10.10.129slave1-wzgslave1.example.com
10.10.10.130slave2-wzgslave2.example.com
slave2节点:
[root@slave2-wzg ~]# vi /etc/hosts
127.0.0.1localhost localhost.localdomain localhost4 localhost4.localdomain4
::1localhost localhost.localdomain localhost6 localhost6.localdomain6
10.10.10.128master-wzgmaster.example.com
10.10.10.129slave1-wzgslave1.example.com
10.10.10.130slave2-wzgslave2.example.com
配置完成后,各节点之间可以相互ping同
[root@master-wzg ~]# ping master-wzg
PING master-wzg (10.10.10.128) 56(84) bytes of data.
64 bytes from master-wzg (10.10.10.128): icmp_seq=1 ttl=64 time=0.038 ms
64 bytes from master-wzg (10.10.10.128): icmp_seq=2 ttl=64 time=0.029 ms
^C
--- master-wzg ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.029/0.033/0.038/0.007 ms
[root@master-wzg ~]# ping slave1-wzg
PING slave1-wzg (10.10.10.129) 56(84) bytes of data.
64 bytes from slave1-wzg (10.10.10.129): icmp_seq=1 ttl=64 time=1.12 ms
64 bytes from slave1-wzg (10.10.10.129): icmp_seq=2 ttl=64 time=0.451 ms
^C
--- slave1-wzg ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.451/0.787/1.123/0.336 ms
[root@master-wzg ~]# ping slave2-wzg
PING slave2-wzg (10.10.10.130) 56(84) bytes of data.
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=1 ttl=64 time=1.28 ms
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=2 ttl=64 time=0.903 ms
64 bytes from slave2-wzg (10.10.10.130): icmp_seq=3 ttl=64 time=0.489 ms
^C
--- slave2-wzg ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2004ms
rtt min/avg/max/mdev = 0.489/0.891/1.282/0.324 ms
二:SSH 无密码验证配置 一、生成 SSH 密钥,并配置自我免密登录 步骤一、配置 SSH 服务配置文件
使用 root 用户登录,修改 SSH 配置文件"/etc/ssh/sshd_config"的内容,需要将PubkeyAuthentication yes前面的#号删除,启用公钥私钥配对认证方式。设置完后需要重启 SSH 服务,才能使配置生效。
(在所有节点上执行)
[root@master-wzg ~]# vi /etc/ssh/sshd_config
PubkeyAuthentication yes
[root@master-wzg ~]# systemctl restart sshd
步骤二、创建 hadoop 用户
(在所有节点上执行)
[root@master-wzg ~]# useradd hadoop
[root@master-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
步骤三、切换 hadoop 用户,生成秘钥对
(在所有节点上执行)
[root@master-wzg ~]# su - hadoop
[hadoop@master-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
cc:31:89:1c:69:69:6b:8c:5b:b8:61:64:12:f5:7a:62 hadoop@master-wzg
The key's randomart image is:
+--[ RSA 2048]----+
|.o. .o|
|. oo=o .|
|+ *+.+|
|=.=o o|
|.E*. S|
|.oo|
||
||
||
+-----------------+
查看.ssh文件下是否有两个刚生产的无密码密钥对
[hadoop@master-wzg ~]$ cd ~/.ssh/
[hadoop@master-wzg .ssh]$ ll
total 8
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop399 Mar 19 12:05 id_rsa.pub
步骤四、将 id_rsa.pub 追加到授权 key 文件中
(在所有节点上执行)
[hadoop@master-wzg .ssh]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@master-wzg .ssh]$ ll ~/.ssh/
total 12
-rw-rw-r--. 1 hadoop hadoop399 Mar 19 12:07 authorized_keys
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop399 Mar 19 12:05 id_rsa.pub
步骤五、修改文件"authorized_keys"权限
(在所有节点上执行)
修改后 authorized_keys 文件的权限为600,表示所有者可读写,其他用户没有访问权限。如果该文件权限太大,ssh 服务会拒绝工作,出现无法通过密钥文件进行登录认证的情况。
[hadoop@master-wzg .ssh]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@master-wzg .ssh]$ ll ~/.ssh/
total 12
-rw-------. 1 hadoop hadoop399 Mar 19 12:07 authorized_keys
-rw-------. 1 hadoop hadoop 1679 Mar 19 12:05 id_rsa
-rw-r--r--. 1 hadoop hadoop399 Mar 19 12:05 id_rsa.pub
步骤六、验证 SSH 登录本机
通过ssh localhost命令,在 hadoop 用户下验证能否嵌套登录本机,若可以不输入密码登录,则本机通过密钥登录认证成功。
首次登录时会提示系统无法确认 host 主机的真实性,只知道它的公钥指纹,询问用户是否还想继续连接。需要输入“yes”,表示继续登录。第二次再登录同一个主机,则不会再出现该提示。
(在所有节点上执行)
[hadoop@master-wzg .ssh]$ cd
[hadoop@master-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:30 2022
[hadoop@master-wzg ~]$ exit
logout
Connection to localhost closed.
slave1节点上做相同操作
[root@slave1-wzg ~]# systemctl restart sshd
[root@slave1-wzg ~]# useradd hadoop
[root@slave1-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
Changing password for user hadoop.
passwd: all authentication tokens updated successfully.
[root@slave1-wzg ~]# su - hadoop
[hadoop@slave1-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
c5:cb:e1:e4:20:4f:9a:d9:4f:42:d4:0b:a2:b1:8d:53 hadoop@slave1-wzg
The key's randomart image is:
+--[ RSA 2048]----+
|..|
|. E....|
|B..+.=.|
|= .X B.o|
|.+ S *|
|+|
|.|
||
||
+-----------------+
[hadoop@slave1-wzg ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ cd
[hadoop@slave1-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:33 2022
[hadoop@slave1-wzg ~]$ exit
logout
Connection to localhost closed.
slave2节点上做相同操作
[root@slave2-wzg ~]# systemctl restart sshd
[root@slave2-wzg ~]# useradd hadoop
[root@slave2-wzg ~]# echo 'hadoop' | passwd --stdin hadoop
更改用户 hadoop 的密码 。
passwd:所有的身份验证令牌已经成功更新。
[root@slave2-wzg ~]# su - hadoop
[hadoop@slave2-wzg ~]$ ssh-keygen -t rsa -P ''
Generating public/private rsa key pair.
Enter file in which to save the key (/home/hadoop/.ssh/id_rsa):
Created directory '/home/hadoop/.ssh'.
Your identification has been saved in /home/hadoop/.ssh/id_rsa.
Your public key has been saved in /home/hadoop/.ssh/id_rsa.pub.
The key fingerprint is:
88:77:d5:c6:d6:09:3c:3e:e4:95:55:4a:41:f0:f0:63 hadoop@slave2-wzg
The key's randomart image is:
+--[ RSA 2048]----+
|.+o+++|
|o+Boo |
|.+=oE|
|. . . o+. . |
|. o S.|
|. .|
||
||
||
+-----------------+
[hadoop@slave2-wzg ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$
[hadoop@slave2-wzg ~]$ chmod 600 ~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$ ssh localhost
The authenticity of host 'localhost (::1)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'localhost' (ECDSA) to the list of known hosts.
Last login: Sat Mar 19 12:05:35 2022
[hadoop@slave2-wzg ~]$ exit
登出
Connection to localhost closed.
二:配置master免密登录slave1和slave2节点 步骤一、将Master节点的公钥复制到每个Slave节点
hadoop 用户登录,通过 scp 命令实现密钥拷贝。
首次远程连接时系统会询问用户是否要继续连接。需要输入“yes”,表示继续。因为目前尚未完成密钥认证的配置,所以使用 scp 命令拷贝文件需要输入 slave1 节点 hadoop用户的密码。
(master节点)
[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave1-wzg:~/
The authenticity of host 'slave1-wzg (10.10.10.129)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'slave1-wzg,10.10.10.129' (ECDSA) to the list of known hosts.
hadoop@slave1-wzg's password:
id_rsa.pub100%3990.4KB/s00:00
[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave2:~/
ssh: Could not resolve hostname slave2: Name or service not known
lost connection
[hadoop@master-wzg ~]$ scp ~/.ssh/id_rsa.pub hadoop@slave2-wzg:~/
The authenticity of host 'slave2-wzg (10.10.10.130)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'slave2-wzg,10.10.10.130' (ECDSA) to the list of known hosts.
hadoop@slave2-wzg's password:
id_rsa.pub100%3990.4KB/s00:00
步骤二、在Slave节点将公钥复制到文件
在每个 Slave 节点把 Master 节点复制的公钥复制到 authorized_keys 文件,并删除 id_rsa.pub 文件hadoop 用户登录 slave1 和 slave2 节点,执行命令。
(slave1和slave2节点)
[hadoop@slave1-wzg ~]$ cat ~/id_rsa.pub >>~/.ssh/authorized_keys
[hadoop@slave1-wzg ~]$ rm -f ~/id_rsa.pub[hadoop@slave2-wzg ~]$ cat ~/id_rsa.pub >>~/.ssh/authorized_keys
[hadoop@slave2-wzg ~]$ rm -f ~/id_rsa.pub
步骤三、验证Master到每个Slave节点无密码登录
hadoop 用户登录 master 节点,执行 SSH 命令登录 slave1 和 slave2 节点。可以观察到不需要输入密码即可实现 SSH 登录。
(master节点)
[hadoop@master-wzg ~]$ ssh slave1-wzg
Last login: Sat Mar 19 12:09:53 2022 from localhost
[hadoop@slave1-wzg ~]$ exit
logout
Connection to slave1-wzg closed.
[hadoop@master-wzg ~]$ ssh slave2-wzg
Last login: Sat Mar 19 12:09:57 2022 from localhost
[hadoop@slave2-wzg ~]$ exit
logout
Connection to slave2-wzg closed.
三:配置slave节点免密登录master和对方 步骤一、将 Slave1和Slave2 节点的公钥保存到Master
使用
ssh-copy-id hadoop@master-wzg
一条命令更方便(slave1节点)
[hadoop@slave1-wzg ~]$ ssh-copy-id hadoop@master-wzg
The authenticity of host 'master-wzg (10.10.10.128)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@master-wzg's password: Number of key(s) added: 1Now try logging into the machine, with:"ssh 'hadoop@master-wzg'"
and check to make sure that only the key(s) you wanted were added.[hadoop@slave1-wzg ~]$ ssh-copy-id hadoop@slave2-wzg
The authenticity of host 'slave2-wzg (10.10.10.130)' can't be established.
ECDSA key fingerprint is d8:9e:43:3d:35:a1:a5:41:7c:a0:44:23:93:1b:52:b5.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@slave2-wzg's password: Number of key(s) added: 1Now try logging into the machine, with:"ssh 'hadoop@slave2-wzg'"
and check to make sure that only the key(s) you wanted were added.
(slave2节点)
[hadoop@slave2-wzg ~]$ ssh-copy-id hadoop@master-wzg
The authenticity of host 'master-wzg (10.10.10.128)' can't be established.
ECDSA key fingerprint is e6:c6:14:00:9c:6e:33:68:0a:b5:bb:6a:54:c4:ba:8d.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@master-wzg's password: Number of key(s) added: 1Now try logging into the machine, with:"ssh 'hadoop@master-wzg'"
and check to make sure that only the key(s) you wanted were added.[hadoop@slave2-wzg ~]$ ssh-copy-id hadoop@slave1-wzg
The authenticity of host 'slave1-wzg (10.10.10.129)' can't be established.
ECDSA key fingerprint is 03:f0:73:9b:d5:ea:a4:28:9f:f1:83:e4:26:8b:00:5f.
Are you sure you want to continue connecting (yes/no)? yes
/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
hadoop@slave1-wzg's password: Number of key(s) added: 1Now try logging into the machine, with:"ssh 'hadoop@slave1-wzg'"
and check to make sure that only the key(s) you wanted were added.
步骤二、查看各节点的公钥
查看 Master 节点 authorized_keys 文件
[hadoop@master-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
[hadoop@master-wzg ~]$
查看 Slave1 节点 authorized_keys 文件
[hadoop@slave1-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
[hadoop@slave1-wzg ~]$
查看 Slave2 节点 authorized_keys 文件
[hadoop@slave2-wzg ~]$ cat ~/.ssh/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDKt6zf1MvvzHIrV/q+r7diZxT7M9VH1YSfhu0OA1g0kM+bXAGcJsEOMyJ4Z+qjINPdhj/Y3qGVIeiRoCS30JnVPHNtFALmTejqwLW9OUNY8/SMRI6C2efeBHFP8F1lNToHGwm94mgqQReKKQ47CRKd7QykxFqJ+TcZNuwTUhOrFEK0aV/9cPZndELs63k3Y9Hf1fqbWWOLEtcYRXOKIuRXodLqASMfZ4bOaTQxQ4BJ6HxrFZyShnO+CFLIXGaz0hyt2Pbo8qqt+W/g+/dLgFYb4Ej8SyFUAztj74haU1SJeO+QNMYf7XdnRFyH/h75OZqGL5RgnhTQQ3Ej7qOBdnl hadoop@slave2-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCz0hVATPM5X3Wr4J9vVLeNihrcix++wQ0S7EtCcnhLI1fZfqLbGs7lApUt1UzejdDAopO2CYi93knzkmTDD7evJ1H5caz+4qZl3Owd5+8XCBpCU9EtJFIU5yLgl93gjSIJ/GpKOGaHlp/KPHXNn9uRvidDNUKumRq2fJDfYkFxCsvQEg+j2t6SNvUvBQ53txmPYBWAJkr8jRgudilRszCDsRpwnYGDIbigWowtnktCYz7zc/s0aqbdXoqkOtcA4H/OwwdAB0SX8HXfnNA1PwHMPPeERsQqTIgU82Tj1UKsibcNhD7F4r12CS3ity+d9GvuwQ4YooUsbsPzVr0YAk6p hadoop@master-wzg
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC9XSD+SmFjeVz3DCZLzGJV+5/EVEV1D8mkNTDcPzVsoGZn1g5wuqaGm1PJ3HYffhkHp1SsgDH5hMxeeV25+3jsyzaMT3qlm7kG/Cz+DwhM1UyeKfVh5Pev46UfG1i3GeMBmP1Lx6EbN50sAVTWKJuGg2Y9gfdIHv/9BL9A9JJlap4tOKqfMcsMivEZAL8gSUv7PGQp3tfqxaFu6ZqOWDDuC06+8q2NDfZQmw6n4W1kdXYLR9iP8STc7IedlEJ8vHoNifJE/QW2uSq+yhxgPF+TPo3mq0iJa5L27cUHVOcbFstJ/cXAKMFre43mnDCK4br7ajFWgLuDyafOg831rGFJ hadoop@slave1-wzg
[hadoop@slave2-wzg ~]$
可以看到每个节点 authorized_keys 文件中包括 master、slave1、slave2 三个节点的公钥。
步骤三、验证每个Slave节点无密码登录Master和对方
验证 Slave1 节点到 Master 节点无密码登录
[hadoop@slave1-wzg ~]$ ssh master-wzg
Last login: Sat Mar 19 12:09:48 2022 from localhost
[hadoop@master-wzg ~]$ exit
logout
Connection to master-wzg closed.
[hadoop@slave1-wzg ~]$ ssh slave2-wzg
Last login: Sat Mar 19 12:13:40 2022 from master-wzg
[hadoop@slave2-wzg ~]$ exit
logout
Connection to slave2-wzg closed.
验证 Slave2 节点到 Master 节点无密码登录
[hadoop@slave2-wzg ~]$ ssh master-wzg
Last login: Sat Mar 19 12:15:40 2022 from slave1-wzg
[hadoop@master-wzg ~]$ exit
登出
Connection to master-wzg closed.
[hadoop@slave2-wzg ~]$ ssh slave1-wzg
Last login: Sat Mar 19 12:13:30 2022 from master-wzg
[hadoop@slave1-wzg ~]$ exit
登出
Connection to slave1-wzg closed.
即可实现三台节点(Master 、Slave1、Slave2 )相互免密登录
声明:未经许可,不得转载
推荐阅读
- Unity利用XML制作一个简易的登录系统
- 单点登录|Authing Share|系统安全与防范
- 单点登录|Authing Share丨如何对用户进行权限管理
- Linux实操篇01--远程登录和远程上传和下载文件
- Java基础与算法|Linux学习(二)---实操篇1远程登录
- Linux|Linux【实操篇】—— 远程登录、远程文件传输、vi和vim工具的使用方法
- redis|秒杀项目前期之登录功能
- Django|Vue + Django 2.0.6 学习笔记 7.1 drf的token登录
- 抖音微信登录服务器繁忙,抖音无法正常使用微信登录_抖音微信登陆失败解决方法_游戏吧...
- 抖音微信登录服务器繁忙,微信登录抖音失败解决方法介绍