Go教程

MongoDB分片集群之添加分片节点

本文主要是介绍MongoDB分片集群之添加分片节点,对大家解决编程问题具有一定的参考价值,需要的程序猿们随着小编来一起学习吧!

    在生产环境添加分片既添加一个副本集,所以此次测试我们需要新创建一个副本集。副本集创建过程参考文档如下:
https://blog.51cto.com/u_12592884/2698103
 

查看新建副本集状态

hdshard4:PRIMARY> rs.status()
{
    "set" : "hdshard4",
    "date" : ISODate("2021-04-19T07:38:30.335Z"),
    "myState" : 1,
    "term" : NumberLong(2),
    "syncingTo" : "",
    "syncSourceHost" : "",
    "syncSourceId" : -1,
    "heartbeatIntervalMillis" : NumberLong(2000),
    "majorityVoteCount" : 2,
    "writeMajorityCount" : 2,
    "optimes" : {
        "lastCommittedOpTime" : {
            "ts" : Timestamp(1618817908, 1),
            "t" : NumberLong(2)
        },
        "lastCommittedWallTime" : ISODate("2021-04-19T07:38:28.435Z"),
        "readConcernMajorityOpTime" : {
            "ts" : Timestamp(1618817908, 1),
            "t" : NumberLong(2)
        },
        "readConcernMajorityWallTime" : ISODate("2021-04-19T07:38:28.435Z"),
        "appliedOpTime" : {
            "ts" : Timestamp(1618817908, 1),
            "t" : NumberLong(2)
        },
        "durableOpTime" : {
            "ts" : Timestamp(1618817908, 1),
            "t" : NumberLong(2)
        },
        "lastAppliedWallTime" : ISODate("2021-04-19T07:38:28.435Z"),
        "lastDurableWallTime" : ISODate("2021-04-19T07:38:28.435Z")
    },
    "lastStableRecoveryTimestamp" : Timestamp(1618817612, 1),
    "lastStableCheckpointTimestamp" : Timestamp(1618817612, 1),
    "electionCandidateMetrics" : {
        "lastElectionReason" : "electionTimeout",
        "lastElectionDate" : ISODate("2021-04-19T07:37:48.397Z"),
        "electionTerm" : NumberLong(2),
        "lastCommittedOpTimeAtElection" : {
            "ts" : Timestamp(0, 0),
            "t" : NumberLong(-1)
        },
        "lastSeenOpTimeAtElection" : {
            "ts" : Timestamp(1618817622, 1),
            "t" : NumberLong(1)
        },
        "numVotesNeeded" : 2,
        "priorityAtElection" : 1,
        "electionTimeoutMillis" : NumberLong(10000),
        "numCatchUpOps" : NumberLong(0),
        "newTermStartDate" : ISODate("2021-04-19T07:37:48.423Z"),
        "wMajorityWriteAvailabilityDate" : ISODate("2021-04-19T07:37:49.230Z")
    },
    "members" : [
        {
            "_id" : 0,
            "name" : "172.16.254.139:40005",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",
            "uptime" : 55,
            "optime" : {
                "ts" : Timestamp(1618817908, 1),
                "t" : NumberLong(2)
            },
            "optimeDate" : ISODate("2021-04-19T07:38:28Z"),
            "syncingTo" : "",
            "syncSourceHost" : "",
            "syncSourceId" : -1,
            "infoMessage" : "could not find member to sync from",
            "electionTime" : Timestamp(1618817868, 1),
            "electionDate" : ISODate("2021-04-19T07:37:48Z"),
            "configVersion" : 1,
            "self" : true,
            "lastHeartbeatMessage" : ""
        },
        {
            "_id" : 1,
            "name" : "172.16.254.139:40006",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",
            "uptime" : 46,
            "optime" : {
                "ts" : Timestamp(1618817908, 1),
                "t" : NumberLong(2)
            },
            "optimeDurable" : {
                "ts" : Timestamp(1618817908, 1),
                "t" : NumberLong(2)
            },
            "optimeDate" : ISODate("2021-04-19T07:38:28Z"),
            "optimeDurableDate" : ISODate("2021-04-19T07:38:28Z"),
            "lastHeartbeat" : ISODate("2021-04-19T07:38:28.466Z"),
            "lastHeartbeatRecv" : ISODate("2021-04-19T07:38:29.392Z"),
            "pingMs" : NumberLong(0),
            "lastHeartbeatMessage" : "",
            "syncingTo" : "172.16.254.139:40005",
            "syncSourceHost" : "172.16.254.139:40005",
            "syncSourceId" : 0,
            "infoMessage" : "",
            "configVersion" : 1
        },
        {
            "_id" : 2,
            "name" : "172.16.254.139:40007",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",
            "uptime" : 37,
            "optime" : {
                "ts" : Timestamp(1618817908, 1),
                "t" : NumberLong(2)
            },
            "optimeDurable" : {
                "ts" : Timestamp(1618817908, 1),
                "t" : NumberLong(2)
            },
            "optimeDate" : ISODate("2021-04-19T07:38:28Z"),
            "optimeDurableDate" : ISODate("2021-04-19T07:38:28Z"),
            "lastHeartbeat" : ISODate("2021-04-19T07:38:28.459Z"),
            "lastHeartbeatRecv" : ISODate("2021-04-19T07:38:30.049Z"),
            "pingMs" : NumberLong(0),
            "lastHeartbeatMessage" : "",
            "syncingTo" : "172.16.254.139:40006",
            "syncSourceHost" : "172.16.254.139:40006",
            "syncSourceId" : 1,
            "infoMessage" : "",
            "configVersion" : 1
        }
    ],
    "ok" : 1
}

查看待添加节点的分片集的状态

mongos> sh.status()
--- Sharding Status --- 
  sharding version: {
    "_id" : 1,
    "minCompatibleVersion" : 5,
    "currentVersion" : 6,
    "clusterId" : ObjectId("60545017224c766911a9c440")
  }
  shards:
        {  "_id" : "hdshard1",  "host" : "hdshard1/172.16.254.136:40001,172.16.254.137:40001,172.16.254.138:40001",  "state" : 1 }
        {  "_id" : "hdshard2",  "host" : "hdshard2/172.16.254.136:40002,172.16.254.137:40002,172.16.254.138:40002",  "state" : 1 }
        {  "_id" : "hdshard3",  "host" : "hdshard3/172.16.254.136:40003,172.16.254.137:40003,172.16.254.138:40003",  "state" : 1 }
  active mongoses:
        "4.2.12" : 3
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  no
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours: 
                5 : Success
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    342
                                hdshard2    341
                                hdshard3    341
                        too many chunks to print, use verbose if you want to force print
        {  "_id" : "db1",  "primary" : "hdshard3",  "partitioned" : true,  "version" : {  "uuid" : UUID("71bb472c-7896-4a31-a77c-e3aaf723be3c"),  "lastMod" : 1 } }
        {  "_id" : "recommend",  "primary" : "hdshard1",  "partitioned" : true,  "version" : {  "uuid" : UUID("cb833b8e-cc4f-4c52-83c3-719aa383bac4"),  "lastMod" : 1 } }
                recommend.rcmd_1_min_tag_mei_rong
                        shard key: { "_id" : "hashed" }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    3
                                hdshard2    3
                                hdshard3    2
                        { "_id" : { "$minKey" : 1 } } -->> { "_id" : NumberLong("-6701866976688134138") } on : hdshard2 Timestamp(2, 0) 
                        { "_id" : NumberLong("-6701866976688134138") } -->> { "_id" : NumberLong("-4163240026901542572") } on : hdshard3 Timestamp(3, 0) 
                        { "_id" : NumberLong("-4163240026901542572") } -->> { "_id" : NumberLong("-1616330844721205691") } on : hdshard2 Timestamp(4, 0) 
                        { "_id" : NumberLong("-1616330844721205691") } -->> { "_id" : NumberLong("909129560750995399") } on : hdshard3 Timestamp(5, 0) 
                        { "_id" : NumberLong("909129560750995399") } -->> { "_id" : NumberLong("3449289120186727718") } on : hdshard2 Timestamp(6, 0) 
                        { "_id" : NumberLong("3449289120186727718") } -->> { "_id" : NumberLong("5980358241733552715") } on : hdshard1 Timestamp(6, 1) 
                        { "_id" : NumberLong("5980358241733552715") } -->> { "_id" : NumberLong("8520801504243263436") } on : hdshard1 Timestamp(1, 6) 
                        { "_id" : NumberLong("8520801504243263436") } -->> { "_id" : { "$maxKey" : 1 } } on : hdshard1 Timestamp(1, 7) 
                recommend.rcmd_1_tag_li_liao
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    36
                                hdshard2    36
                                hdshard3    35
                        too many chunks to print, use verbose if you want to force print

查看是否开启balance

mongos> sh.getBalancerState() 
true

添加分片

mongos> sh.addShard("hdshard4/172.16.254.139:40005,172.16.254.139:40006,172.16.254.139:40007")
{
    "shardAdded" : "hdshard4",
    "ok" : 1,
    "operationTime" : Timestamp(1618818723, 4),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1618818723, 4),
        "signature" : {
            "hash" : BinData(0,"hN7ntKc2IjBueOXTau306Dhm3AU="),
            "keyId" : NumberLong("6941260985399246879")
        }
    }
}

再次查看分片集群状态

sh.status()
--- Sharding Status --- 
  sharding version: {
    "_id" : 1,
    "minCompatibleVersion" : 5,
    "currentVersion" : 6,
    "clusterId" : ObjectId("60545017224c766911a9c440")
  }
  shards:
        {  "_id" : "hdshard1",  "host" : "hdshard1/172.16.254.136:40001,172.16.254.137:40001,172.16.254.138:40001",  "state" : 1 }
        {  "_id" : "hdshard2",  "host" : "hdshard2/172.16.254.136:40002,172.16.254.137:40002,172.16.254.138:40002",  "state" : 1 }
        {  "_id" : "hdshard3",  "host" : "hdshard3/172.16.254.136:40003,172.16.254.137:40003,172.16.254.138:40003",  "state" : 1 }
        {  "_id" : "hdshard4",  "host" : "hdshard4/172.16.254.139:40005,172.16.254.139:40006,172.16.254.139:40007",  "state" : 1 }
  active mongoses:
        "4.2.12" : 3
  autosplit:
        Currently enabled: yes
  balancer:
        Currently enabled:  yes
        Currently running:  yes
        Collections with active migrations: 
                recommend.rcmd_1_min_tag_mei_rong started at Mon Apr 19 2021 15:52:30 GMT+0800 (CST)
        Failed balancer rounds in last 5 attempts:  0
        Migration Results for the last 24 hours: 
                9 : Success
  databases:
        {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                config.system.sessions
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    341
                                hdshard2    341
                                hdshard3    341
                                hdshard4    1
                        too many chunks to print, use verbose if you want to force print
        {  "_id" : "db1",  "primary" : "hdshard3",  "partitioned" : true,  "version" : {  "uuid" : UUID("71bb472c-7896-4a31-a77c-e3aaf723be3c"),  "lastMod" : 1 } }
        {  "_id" : "recommend",  "primary" : "hdshard1",  "partitioned" : true,  "version" : {  "uuid" : UUID("cb833b8e-cc4f-4c52-83c3-719aa383bac4"),  "lastMod" : 1 } }
                recommend.rcmd_1_min_tag_mei_rong
                        shard key: { "_id" : "hashed" }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    3
                                hdshard2    3
                                hdshard3    2
                        { "_id" : { "$minKey" : 1 } } -->> { "_id" : NumberLong("-6701866976688134138") } on : hdshard2 Timestamp(2, 0) 
                        { "_id" : NumberLong("-6701866976688134138") } -->> { "_id" : NumberLong("-4163240026901542572") } on : hdshard3 Timestamp(3, 0) 
                        { "_id" : NumberLong("-4163240026901542572") } -->> { "_id" : NumberLong("-1616330844721205691") } on : hdshard2 Timestamp(4, 0) 
                        { "_id" : NumberLong("-1616330844721205691") } -->> { "_id" : NumberLong("909129560750995399") } on : hdshard3 Timestamp(5, 0) 
                        { "_id" : NumberLong("909129560750995399") } -->> { "_id" : NumberLong("3449289120186727718") } on : hdshard2 Timestamp(6, 0) 
                        { "_id" : NumberLong("3449289120186727718") } -->> { "_id" : NumberLong("5980358241733552715") } on : hdshard1 Timestamp(6, 1) 
                        { "_id" : NumberLong("5980358241733552715") } -->> { "_id" : NumberLong("8520801504243263436") } on : hdshard1 Timestamp(1, 6) 
                        { "_id" : NumberLong("8520801504243263436") } -->> { "_id" : { "$maxKey" : 1 } } on : hdshard1 Timestamp(1, 7) 
                recommend.rcmd_1_tag_li_liao
                        shard key: { "_id" : 1 }
                        unique: false
                        balancing: true
                        chunks:
                                hdshard1    34
                                hdshard2    35
                                hdshard3    35
                                hdshard4    3
                        too many chunks to print, use verbose if you want to force print

可以看到hdshard4分片已经加入到分片集群当中。

查看集合分片情况

mongos> use recommend
switched to db recommend
mongos> db.rcmd_1_tag_li_liao.getShardDistribution()

Shard hdshard1 at hdshard1/172.16.254.136:40001,172.16.254.137:40001,172.16.254.138:40001
 data : 1.16GiB docs : 125816 chunks : 34
 estimated data per chunk : 35.22MiB
 estimated docs per chunk : 3700

Shard hdshard3 at hdshard3/172.16.254.136:40003,172.16.254.137:40003,172.16.254.138:40003
 data : 1.06GiB docs : 124879 chunks : 35
 estimated data per chunk : 31.08MiB
 estimated docs per chunk : 3567

Shard hdshard2 at hdshard2/172.16.254.136:40002,172.16.254.137:40002,172.16.254.138:40002
 data : 1.08GiB docs : 128448 chunks : 35
 estimated data per chunk : 31.87MiB
 estimated docs per chunk : 3669

Shard hdshard4 at hdshard4/172.16.254.139:40005,172.16.254.139:40006,172.16.254.139:40007
 data : 97.06MiB docs : 10704 chunks : 3
 estimated data per chunk : 32.35MiB
 estimated docs per chunk : 3568

Totals
 data : 3.41GiB docs : 389847 chunks : 107
 Shard hdshard1 contains 34.23% data, 32.27% docs in cluster, avg obj size on shard : 9KiB
 Shard hdshard3 contains 31.1% data, 32.03% docs in cluster, avg obj size on shard : 8KiB
 Shard hdshard2 contains 31.88% data, 32.94% docs in cluster, avg obj size on shard : 8KiB
 Shard hdshard4 contains 2.77% data, 2.74% docs in cluster, avg obj size on shard : 9KiB

可以看到此集合已经开始往hdshard4迁移数据。

查看当前balance状态

mongos> sh.isBalancerRunning()
true

结果告诉我们balance确实在搬数据。到此分片添加完成,接下来等待漫长的rebalance过程。

这篇关于MongoDB分片集群之添加分片节点的文章就介绍到这儿,希望我们推荐的文章对大家有所帮助,也希望大家多多支持为之网!