数据库|replication-manager之switchover剖析

徐晨亮,MySQL DBA,知数堂学员。热衷于数据库优化,自动化运维及数据库周边工具开发,对MySQL源码有一定的兴趣。
一、切换流程图:
数据库|replication-manager之switchover剖析
文章图片

二、核心代码:
主要实现逻辑在cluster_fail.go的MasterFailover函数
func (cluster *Cluster) MasterFailover(fail bool) bool { if cluster.GetTopology() == topoMultiMasterRing || cluster.GetTopology() == topoMultiMasterWsrep { res := cluster.VMasterFailover(fail) return res } cluster.sme.SetFailoverState() // Phase 1: 相关清理工作并开始选举 if fail == false { // 这里走switchover流程 .... // 检测是否有长事务,如果有的话不能切换 // 检查方式为:select SUM(ct) from ( select count(*) as ct from information_schema.processlistwhere command = 'Query' and time >= ? and info not like 'select%' union all select count(*) as ct\ // FROMINFORMATION_SCHEMA.INNODB_TRX trx WHERE trx.trx_started < CURRENT_TIMESTAMP - INTERVAL ? SECOND) A qt, logs, err := dbhelper.CheckLongRunningWrites(cluster.master.Conn, cluster.Conf.SwitchWaitWrite) // 主库flush tables(FLUSH NO_WRITE_TO_BINLOG TABLES) } // 开始选举new master for _, s := range cluster.slaves { // 刷新状态,获取连接、show master status、show slave status等信息 s.Refresh() } key := -1 if fail { // failover走这里,返回的是一个slave list中的选举出来的slave数组下标 key = cluster.electFailoverCandidate(cluster.slaves, true) } else { // switchover走这里,返回的是一个slave list中的选举出来的slave数组下标 key = cluster.electSwitchoverCandidate(cluster.slaves, true) } if key == -1 { // key为-1的话,表示没有可用的slave被选举出来,switchover失败 cluster.sme.RemoveFailoverState() return false } ...// 根据选举出来的new master进行变量互换 // 即cluster.master为new master,cluster.oldMaster为old master cluster.oldMaster = cluster.master cluster.master = cluster.Servers[skey] cluster.master.State = stateMaster if cluster.Conf.MultiMaster == false { // 非多主模式下,从slave list中删除new master cluster.slaves[key].delete(&cluster.slaves) }if cluster.Conf.PreScript != "" { // 这里调用 pre-failover script,比如摘除VIP等操作 } // 阶段2:拒绝写入请求并且同步slave if fail == false { // Disable Event Scheduler on old master // old master进行冻结,阻止写入 // freeze做了以下事情: // 1. set read only // 2. 检查长事务 // 3. 设置最大连接数 // 4. kill连接 cluster.oldMaster.freeze()// 下发ftwrl logs, err := dbhelper.FlushTablesWithReadLock(cluster.oldMaster.Conn, cluster.oldMaster.DBVersion) } ... // 等待new master的SQL线程状态改为read all relay logs,表示已经应用完relay log err = cluster.master.ReadAllRelayLogs()// 写入同步状态信息 // 获取位点信息 crash.FailoverMasterLogFile = ms.MasterLogFile.String crash.FailoverMasterLogPos = ms.ReadMasterLogPos.String crash.NewMasterLogFile = cluster.master.BinaryLogFile crash.NewMasterLogPos = cluster.master.BinaryLogPos// 根据数据库版本进行不同的处理crash.FailoverIOGtid cluster.master.FailoverSemiSyncSlaveStatus = cluster.master.SemiSyncSlaveStatus crash.FailoverSemiSyncSlaveStatus = cluster.master.SemiSyncSlaveStatus // Phase 3: Prepare new master if cluster.Conf.MultiMaster == false { // new master stop slave } cluster.Crashes = append(cluster.Crashes, crash) t := time.Now()crash.Save(cluster.WorkingDir + "/failover." + t.Format("20060102150405") + ".json") crash.Purge(cluster.WorkingDir, cluster.Conf.FailoverLogFileKeep) cluster.Save() // 解锁old master前调用post-failover script if cluster.Conf.MultiMaster == false { if cluster.master.DBVersion.IsMySQLOrPercona() { // new master stop slave; logs, err := cluster.master.StopSlave() } // new master reset slave logs, err := cluster.master.ResetSlave() } if fail == false { // 获取最新的GTID cluster.master.Refresh() } // new master打开读写 err = cluster.master.SetReadWrite() // sleep SwitchSlaveWaitRouteChange配置的秒数,这里应该是让中间件有时间来做探测动作 time.Sleep(time.Duration(cluster.Conf.SwitchSlaveWaitRouteChange) * time.Second) if cluster.Conf.FailEventScheduler { // 打开new master 的event scheduler logs, err := dbhelper.SetEventScheduler(cluster.master.Conn, true, cluster.master.DBVersion) }...// new master下发flush tables防止异常的事务 logs, err := dbhelper.FlushTables(cluster.master.Conn) ... if fail == false { // 获取最近的GTID信息 cluster.oldMaster.Refresh() // ******** // Phase 4: 将old master降级为slave // ******** //kill old master上的所有连接 dbhelper.KillThreads(cluster.oldMaster.Conn, cluster.oldMaster.DBVersion)// unlock tables logs, err := dbhelper.UnlockTables(cluster.oldMaster.Conn) // 保险起见,old master再次stop slave,在某些场景下old master会仍然存在旧的replication running cluster.oldMaster.StopSlave() one_shoot_slave_pos := false if cluster.oldMaster.DBVersion.IsMariaDB() && cluster.oldMaster.HaveMariaDBGTID == false && cluster.oldMaster.DBVersion.Major >= 10 { // 设置old master GTID_SLAVE_POS logs, err := dbhelper.SetGTIDSlavePos(cluster.oldMaster.Conn, cluster.master.GTIDBinlogPos.Sprint()) one_shoot_slave_pos = true } hasMyGTID := cluster.oldMaster.HasMySQLGTID() cluster.LogSQL(logs, err, cluster.oldMaster.URL, "MasterFailover", LvlErr, "Could not check old master GTID status: %s", err) var changeMasterErr error // 各种姿势的old master start slaveif cluster.Conf.ReadOnly { // old master开启只读 logs, err = dbhelper.SetReadOnly(cluster.oldMaster.Conn, true) } else { logs, err = dbhelper.SetReadOnly(cluster.oldMaster.Conn, false) } if cluster.Conf.SwitchDecreaseMaxConn { // old master设置最大连接数 logs, err := dbhelper.SetMaxConnections(cluster.oldMaster.Conn, cluster.oldMaster.maxConn, cluster.oldMaster.DBVersion) } // Add the old master to the slaves list cluster.oldMaster.State = stateSlave if cluster.Conf.MultiMaster == false { // 将old master加入到slave列表中 cluster.slaves = append(cluster.slaves, cluster.oldMaster) } } // ******** // Phase 5: 其他slave change到new master完成拓扑结构 // ******** for _, sl := range cluster.slaves { if fail == false && cluster.Conf.MxsBinlogOn == false && cluster.Conf.SwitchSlaveWaitCatch { // 等待其他slave同步到new master sl.WaitSyncToMaster(cluster.oldMaster) } logs, err = sl.StopSlave() if fail == false && cluster.Conf.MxsBinlogOn == false && cluster.Conf.SwitchSlaveWaitCatch { if cluster.Conf.FailForceGtid && sl.DBVersion.IsMariaDB() { logs, err := dbhelper.SetGTIDSlavePos(sl.Conn, cluster.oldMaster.GTIDBinlogPos.Sprint()) } } hasMyGTID := cluster.master.HasMySQLGTID() // start slave logs, err = sl.StartSlave()if cluster.Conf.ReadOnly && cluster.Conf.MxsBinlogOn == false && !cluster.IsInIgnoredReadonly(sl) { // 如果配置了slave只读的话,这里将slave设置只读 logs, err = sl.SetReadOnly() } else { if cluster.Conf.MxsBinlogOn == false { // 如果没有配置slave只读,这里slave打开读写 err = sl.SetReadWrite() } } } //完成switchover }

三、总结:
  1. 稍作一下总结吧,总得来说高可用流程大家实现方式都差不多,replication-manger的优点是兼容了MariaDB、Percona MySQL和官方MySQL,从MasterFailover函数就可以看到有大量的兼容性代码,甚至还有一部分PG的代码(这里不是很懂)。另外值得一提的是区别于其他普通高可用方案的vip连接的方式,replication-manager天然支持了很多类型的中间件,比如ProxySQL、MaxScale、HAProxy等等,这样一来,应用程序只需要通过连接中间件即可,至于后端的主从关系维护由replication-manager自动帮你完成,可以说是十分友好了。
  2. 整个过程中通过设置只读,FTWRL,还通过设置最大连接数+kill连接等方式来阻止写入操作,可见对整个集群的保护还是做得挺到位的。
  3. replication-manager提供了自定义脚本的接口,在pre-failover和post-failover阶段都可以自己定义执行脚本,例如你可以在pre-failover阶段通知consul集群摘除write服务,以及发送告警短信等动作;在post-failover阶段通知consul集群重新添加write服务。
  4. 下篇再讲一下failover的流程以及跟switchover的差别。
数据库|replication-manager之switchover剖析
文章图片

【数据库|replication-manager之switchover剖析】叶老师的「MySQL核心优化」大课已升级到MySQL 8.0,扫码开启MySQL 8.0修行之旅吧
数据库|replication-manager之switchover剖析
文章图片

    推荐阅读