操作系統:CentOS 6 x86_64
MongoDB版本:3.4.3
集羣主機拓撲:
主機 | mongo shardsvr & ReplSetName | mongo configsvr & ReplSetName | mongos |
test1.lan | shard-a shard-b | ||
test2.lan | shard-a shard-b | ||
test3.lan | shard-a shard-b | ||
test4.lan | cfgshard | ||
test5.lan | cfgshard | ||
test6.lan | cfgshard | ||
test7.lan | yes |
test1-3 分別在一臺主機上啓動兩個不同副本集名稱的mongod實例。
test4-6 三臺主機作爲 config server 單獨運行。
test7 主機作爲 mongos 路由主機。
安裝 MongoDB
配置 repo 源
1234567[mongodb-org-3.4]
name=MongoDB Repository
#baseurl=https://repo.mongodb.org/yum/redhat//mongodb-org/3.4/x86_64/
baseurl=https:
//mirrors
.aliyun.com
/mongodb/yum/redhat/
$releasever
/mongodb-org/3
.4
/x86_64/
gpgcheck=0
enabled=1
gpgkey=https:
//www
.mongodb.org
/static/pgp/server-3
.4.asc
選擇國內 阿里雲 鏡像資源。
- 1
# yum install mongodb-org -y
配置 /etc/mongod.conf
12345678910111213141516171819202122232425262728293031323334353637383940414243444546# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# where to write logging data.
systemLog:
destination:
file
logAppend:
true
path:
/var/log/mongodb/mongod
.log
# Where and how to store data.
storage:
dbPath:
/var/lib/mongo
journal:
enabled:
true
# engine:
# mmapv1:
# wiredTiger:
# how the process runs
processManagement:
fork:
true
# fork and run in background
pidFilePath:
/var/run/mongodb/mongod
.pid
# location of pidfile
# network interfaces
net:
port: 27017
bindIp: 0.0.0.0
# Listen to local interface only, comment to listen on all interfaces.
#security:
#operationProfiling:
replication:
replSetName: shard-a
sharding:
clusterRole: shardsvr
## Enterprise-Only Options
#auditLog:
#snmp:
replication 處配置 副本集 名,sharding 開啓 shardsvr 模式。
啓動 mongod 服務
12345678[root@test1 ~]
# service mongod start
Starting mongod: [ OK ]
[root@test2 ~]
# service mongod start
Starting mongod: [ OK ]
[root@test3 ~]
# service mongod start
Starting mongod: [ OK ]
配置 shard-a 副本集
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122[root@test1 ~]
# mongo test1.lan:27017
MongoDB shell version v3.4.3
connecting to: test1.lan:27017
MongoDB server version: 3.4.3
Server has startup warnings:
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten]
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended
with
the WiredTiger storage engine
2017-04-24T22:46:19.703+0800 I STORAGE [initandlisten] ** See http:
//dochub.mongodb.org/core/prodnotes-filesystem
2017-04-24T22:46:20.321+0800 I CONTROL [initandlisten]
> rs.initiate()
{
"info2"
:
"no configuration specified. Using a default configuration for the set"
,
"me"
:
"test1.lan:27017"
,
"ok"
: 1
}
shard-a:SECONDARY>
shard-a:PRIMARY> config = rs.config()
# 保存配置對象
{
"_id"
:
"shard-a"
,
"version"
: 1,
"protocolVersion"
: NumberLong(1),
"members"
: [
{
"_id"
: 0,
"host"
:
"test1.lan:27017"
,
"arbiterOnly"
:
false
,
"buildIndexes"
:
true
,
"hidden"
:
false
,
"priority"
: 1,
"tags"
: {
},
"slaveDelay"
: NumberLong(0),
"votes"
: 1
}
],
"settings"
: {
"chainingAllowed"
:
true
,
"heartbeatIntervalMillis"
: 2000,
"heartbeatTimeoutSecs"
: 10,
"electionTimeoutMillis"
: 10000,
"catchUpTimeoutMillis"
: 2000,
"getLastErrorModes"
: {
},
"getLastErrorDefaults"
: {
"w"
: 1,
"wtimeout"
: 0
},
"replicaSetId"
: ObjectId(
"58fe111823612a418eb7f3fc"
)
}
}
shard-a:PRIMARY> config.members[0].priority = 2
# 這裏增加自身主機的優先級爲 2,防止後面 PRIMARY 重新選舉到其餘主機
2
shard-a:PRIMARY> rs.reconfig(config)
# 重新應用該配置
{
"ok"
: 1 }
shard-a:PRIMARY> rs.add(
"test2.lan:27017"
)
# 添加副本集主機
{
"ok"
: 1 }
shard-a:PRIMARY> rs.add(
"test3.lan"
)
# 添加副本集主機(默認端口爲 27017)
{
"ok"
: 1 }
shard-a:PRIMARY> rs.config()
{
"_id"
:
"shard-a"
,
"version"
: 4,
"protocolVersion"
: NumberLong(1),
"members"
: [
{
"_id"
: 0,
"host"
:
"test1.lan:27017"
,
"arbiterOnly"
:
false
,
"buildIndexes"
:
true
,
"hidden"
:
false
,
"priority"
: 2,
"tags"
: {
},
"slaveDelay"
: NumberLong(0),
"votes"
: 1
},
{
"_id"
: 1,
"host"
:
"test2.lan:27017"
,
"arbiterOnly"
:
false
,
"buildIndexes"
:
true
,
"hidden"
:
false
,
"priority"
: 1,
"tags"
: {
},
"slaveDelay"
: NumberLong(0),
"votes"
: 1
},
{
"_id"
: 2,
"host"
:
"test3.lan:27017"
,
"arbiterOnly"
:
false
,
"buildIndexes"
:
true
,
"hidden"
:
false
,
"priority"
: 1,
"tags"
: {
},
"slaveDelay"
: NumberLong(0),
"votes"
: 1
}
],
"settings"
: {
"chainingAllowed"
:
true
,
"heartbeatIntervalMillis"
: 2000,
"heartbeatTimeoutSecs"
: 10,
"electionTimeoutMillis"
: 10000,
"catchUpTimeoutMillis"
: 2000,
"getLastErrorModes"
: {
},
"getLastErrorDefaults"
: {
"w"
: 1,
"wtimeout"
: 0
},
"replicaSetId"
: ObjectId(
"58fe111823612a418eb7f3fc"
)
}
}
這樣,副本集 shard-a 就配置完成
接下來我們啓動並配置副本集 shard-b
1234567891011121314151617[root@test1 ~]
# mkdir /var/lib/mongo2
[root@test1 ~]
# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 14323
child process started successfully, parent exiting
[root@test2 ~]
# mkdir /var/lib/mongo2
[root@test2 ~]
# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 5623
child process started successfully, parent exiting
[root@test3 ~]
# mkdir /var/lib/mongo2
[root@test3 ~]
# mongod --shardsvr --replSet shard-b --dbpath /var/lib/mongo2/ --port 37017 --logpath /var/log/mongodb/mongod2.log --fork --journal
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 4303
child process started successfully, parent exiting
配置 shard-b 副本集
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990[root@test1 ~]
# mongo test1.lan:37017
MongoDB shell version v3.4.3
connecting to: test1.lan:37017
MongoDB server version: 3.4.3
Server has startup warnings:
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten]
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended
with
the WiredTiger storage engine
2017-04-24T22:59:43.476+0800 I STORAGE [initandlisten] ** See http:
//dochub.mongodb.org/core/prodnotes-filesystem
2017-04-24T22:59:44.019+0800 I CONTROL [initandlisten]
> rs.initiate()
{
"info2"
:
"no configuration specified. Using a default configuration for the set"
,
"me"
:
"test1.lan:37017"
,
"ok"
: 1
}
shard-b:SECONDARY>
shard-b:PRIMARY> config = rs.config()
{
"_id"
:
"shard-b"
,
"version"
: 1,
"protocolVersion"
: NumberLong(1),
"members"
: [
{
"_id"
: 0,
"host"
:
"test1.lan:37017"
,
"arbiterOnly"
:
false
,
"buildIndexes"
:
true
,
"hidden"
:
false
,
"priority"
: 1,
"tags"
: {
},
"slaveDelay"
: NumberLong(0),
"votes"
: 1
}
],
"settings"
: {
"chainingAllowed"
:
true
,
"heartbeatIntervalMillis"
: 2000,
"heartbeatTimeoutSecs"
: 10,
"electionTimeoutMillis"
: 10000,
"catchUpTimeoutMillis"
: 2000,
"getLastErrorModes"
: {
},
"getLastErrorDefaults"
: {
"w"
: 1,
"wtimeout"
: 0
},
"replicaSetId"
: ObjectId(
"58fe1465f7a2e985d87b8bf8"
)
}
}
shard-b:PRIMARY> config.members[0].priority = 2
2
shard-b:PRIMARY> rs.reconfig(config)
{
"ok"
: 1 }
shard-b:PRIMARY> rs.add(
"test2.lan:37017"
)
{
"ok"
: 1 }
shard-b:PRIMARY> rs.add(
"test3.lan:37017"
)
{
"ok"
: 1 }
shard-b:PRIMARY> rs.isMaster()
{
"hosts"
: [
"test1.lan:37017"
,
"test2.lan:37017"
,
"test3.lan:37017"
],
"setName"
:
"shard-b"
,
"setVersion"
: 4,
"ismaster"
:
true
,
"secondary"
:
false
,
"primary"
:
"test1.lan:37017"
,
"me"
:
"test1.lan:37017"
,
"electionId"
: ObjectId(
"7fffffff0000000000000001"
),
"lastWrite"
: {
"opTime"
: {
"ts"
: Timestamp(1493046429, 1),
"t"
: NumberLong(1)
},
"lastWriteDate"
: ISODate(
"2017-04-24T15:07:09Z"
)
},
"maxBsonObjectSize"
: 16777216,
"maxMessageSizeBytes"
: 48000000,
"maxWriteBatchSize"
: 1000,
"localTime"
: ISODate(
"2017-04-24T15:07:24.475Z"
),
"maxWireVersion"
: 5,
"minWireVersion"
: 0,
"readOnly"
:
false
,
"ok"
: 1
}
這樣 shard-a shard-b 兩個副本集已經配置完成
開始配置 config server,MongoDB 從3.2版本之後開始規定 config server 也必須要開啓副本集功能。
config server 的配置文件如下:config server 一般情況下是監聽在 27019 端口12345678910111213141516171819202122232425262728293031323334353637383940414243444546# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# where to write logging data.
systemLog:
destination:
file
logAppend:
true
path:
/var/log/mongodb/mongod
.log
# Where and how to store data.
storage:
dbPath:
/var/lib/mongo
journal:
enabled:
true
# engine:
# mmapv1:
# wiredTiger:
# how the process runs
processManagement:
fork:
true
# fork and run in background
pidFilePath:
/var/run/mongodb/mongod
.pid
# location of pidfile
# network interfaces
net:
port: 27019
bindIp: 0.0.0.0
# Listen to local interface only, comment to listen on all interfaces.
#security:
#operationProfiling:
replication:
replSetName: cfgReplSet
sharding:
clusterRole: configsvr
## Enterprise-Only Options
#auditLog:
#snmp:
啓動三臺config server 的mongod 服務
12345678[root@test4 ~]
# service mongod start
Starting mongod: [ OK ]
[root@test5 ~]
# service mongod start
Starting mongod: [ OK ]
[root@test6 ~]
# service mongod start
Starting mongod: [ OK ]
同樣,config server 的副本集配置如上文所示,這裏的代碼就省略了。
配置啓動 mongos 路由主機
1234[root@test7 ~]
# mongos --configdb cfgReplSet/test4.lan,test5.lan,test6.lan:27019 --logpath /var/log/mongodb/mongos.log --fork --port 30000
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 3338
child process started successfully, parent exiting
MongoDB 版本 >3.2 啓動mongos 的時候,需要跟上 config server 的副本集名稱 (這裏是 cfgReplSet)
連接 mongos 測試
1234567891011121314151617181920212223242526[root@test7 ~]
# mongo test7.lan:30000
MongoDB shell version v3.4.4
connecting to: test7.lan:30000
MongoDB server version: 3.4.4
Server has startup warnings:
2017-04-24T23:30:47.285+0800 I CONTROL [main]
2017-04-24T23:30:47.285+0800 I CONTROL [main] ** WARNING: Access control is not enabled
for
the database.
2017-04-24T23:30:47.285+0800 I CONTROL [main] ** Read and write access to data and configuration is unrestricted.
2017-04-24T23:30:47.285+0800 I CONTROL [main] ** WARNING: You are running
this
process as the root user, which is not recommended.
2017-04-24T23:30:47.285+0800 I CONTROL [main]
mongos> show dbs
admin 0.000GB
config 0.000GB
mongos> use config
switched to db config
mongos> show collections
chunks
lockpings
locks
migrations
mongos
shards
tags
version
mongos> db.shards.find()
# 這裏沒有返回文檔,也說明分片集羣中並沒有添加可用分片集羣。
配置分片集羣:shard
1234567891011121314151617181920212223242526272829303132333435363738mongos> sh.addShard(
"shard-a/test1.lan,test2.lan,test3.lan"
)
{
"shardAdded"
:
"shard-a"
,
"ok"
: 1 }
mongos> db.shards.find()
{
"_id"
:
"shard-a"
,
"host"
:
"shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017"
,
"state"
: 1 }
mongos> sh.addShard(
"shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017"
)
{
"shardAdded"
:
"shard-b"
,
"ok"
: 1 }
mongos> db.shards.find()
{
"_id"
:
"shard-a"
,
"host"
:
"shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017"
,
"state"
: 1 }
{
"_id"
:
"shard-b"
,
"host"
:
"shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017"
,
"state"
: 1 }
# 檢查分片集羣的分片副本集數量,方法一
mongos> db.getSiblingDB(
'config'
).shards.find()
{
"_id"
:
"shard-a"
,
"host"
:
"shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017"
,
"state"
: 1 }
{
"_id"
:
"shard-b"
,
"host"
:
"shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017"
,
"state"
: 1 }
# 檢查分片集羣的分片副本集數量,方法二
mongos> use admin
switched to db admin
mongos> db.runCommand({listshards: 1})
{
"shards"
: [
{
"_id"
:
"shard-a"
,
"host"
:
"shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017"
,
"state"
: 1
},
{
"_id"
:
"shard-b"
,
"host"
:
"shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017"
,
"state"
: 1
}
],
"ok"
: 1
}
# 檢查分片集羣的分片副本集數量,方法三
配置分片集合:接下來的步驟就是在數據庫上啓動分片。分片不會自動完成,而是需要在數據庫裏提前爲集合做好設置才行。
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152mongos> sh.enableSharding(
"test2_db"
)
# 該庫可以是已存在的,也可以是暫不存在的
{
"ok"
: 1 }
mongos> db.getSiblingDB(
"config"
).databases.find()
{
"_id"
:
"test2_db"
,
"primary"
:
"shard-a"
,
"partitioned"
:
true
}
# sharding 分片庫的配置庫 databases 集合已經有相應的配置記錄了。
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id"
: 1,
"minCompatibleVersion"
: 5,
"currentVersion"
: 6,
"clusterId"
: ObjectId(
"58fe17e90b3df66581ff6b09"
)
}
shards:
{
"_id"
:
"shard-a"
,
"host"
:
"shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017"
,
"state"
: 1 }
{
"_id"
:
"shard-b"
,
"host"
:
"shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017"
,
"state"
: 1 }
active mongoses:
"3.4.4"
: 1
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer
Failed balancer rounds
in
last 5 attempts: 0
Migration Results
for
the last 24 hours:
No recent migrations
databases:
{
"_id"
:
"test2_db"
,
"primary"
:
"shard-a"
,
"partitioned"
:
true
}
# 分片狀態中更能方便的查看當前分片的狀態信息,包括分片集羣成員 以及 分片數據庫,分片機制等。
mongos> sh.shardCollection(
"test2_db.users"
, {username: 1, _id: 1})
{
"collectionsharded"
:
"test2_db.users"
,
"ok"
: 1 }
# 此處,我們選擇 username _id 作爲組合分片鍵,組合分片鍵必須是一個索引
# 如果集合爲空,那麼該行命令會自動在集合中創建該索引,如果集合已存在對應數據,且該組合鍵的索引沒有事先創建好,那麼這條語句就會拋出錯誤
# 需要手動到集合創建該組合的索引,之後才能作爲分片鍵
mongos> db.getSiblingDB(
"config"
).collections.find().pretty()
{
"_id"
:
"test2_db.users"
,
"lastmodEpoch"
: ObjectId(
"58fe21de224dc86230e9a8f7"
),
"lastmod"
: ISODate(
"1970-02-19T17:02:47.296Z"
),
"dropped"
:
false
,
"key"
: {
"username"
: 1,
"_id"
: 1
},
"unique"
:
false
}
# 配置完成後,config.collections 就存在了相應集合的分片鍵信息。
來看看分片集合在單獨分片副本集中的存在形式
首先需要找到該庫已經被分配到了哪個分片之上(由於該庫之前並沒有數據,所以創建分片鍵的時候,會自動插入索引數據,自動按照默認配置路由到其中一個分片鍵集羣之中)
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | mongos> sh.status() --- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId( "58fe17e90b3df66581ff6b09" ) } shards: { "_id" : "shard-a" , "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017" , "state" : 1 } { "_id" : "shard-b" , "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017" , "state" : 1 } active mongoses: "3.4.4" : 1 autosplit: Currently enabled: yes balancer: Currently enabled: yes Currently running: no Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer Failed balancer rounds in last 5 attempts: 0 Migration Results for the last 24 hours: No recent migrations databases: { "_id" : "test2_db" , "primary" : "shard-a" , "partitioned" : true } test2_db.users shard key: { "username" : 1, "_id" : 1 } unique: false balancing: true chunks: shard-a 1 { "username" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } } -->> { "username" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } } on : shard-a Timestamp(1, 0) # 最後這行 databases 看到了,該庫的該數據塊(chunks) 被分配到了 shard-a 副本集中,那麼我們接下來就可以直接到 shard-a 中查看該庫中users集合的文檔信息。 # 登錄到 shard-a 副本集中進行查看 shard-a:PRIMARY> show dbs admin 0.000GB local 0.000GB test2_db 0.000GB shard-a:PRIMARY> use test2_db switched to db test2_db shard-a:PRIMARY> db.users.find() # 該集合暫時沒有文檔 shard-a:PRIMARY> db.users.getIndexes() # 查看該集合的索引配置信息 [ { "v" : 2, "key" : { "_id" : 1 }, "name" : "_id_" , "ns" : "test2_db.users" }, { "v" : 2, "key" : { "username" : 1, "_id" : 1 }, "name" : "username_1__id_1" , "ns" : "test2_db.users" } # 查看到了兩個索引,第一個索引 _id 爲系統默認添加的索引,第二個索引就是創建分片鍵的時候自動創建的組合鍵索引 ] |
寫入數據到分片集羣
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 | # 首先創建一個數據對象,用來填充文檔大小 mongos> data = new Array(2049).join( "abcd " ) mongos> data.length 10240 # data 大小爲 1MB mongos> for ( var i=0; i < 100; i++){ ... db.getSiblingDB( "test2_db" ).users.insert({ ... username: "Join" + i, ... age: i % 13 + 20, ... data: data } ... ) ... } WriteResult({ "nInserted" : 1 }) # 批量插入 100 條文檔,每個文檔約爲 1MB 大小。 # 接下來看看有了這麼多文檔過後,會怎麼分片。 mongos> db.getSiblingDB( "config" ).chunks.count() 3 # 插入這麼多數據以後,就會發現多了幾個數據塊。我們可以通過檢查集合中的數據庫的數量來驗證這個猜想 mongos> db.getSiblingDB( "config" ).chunks.find().pretty() { "_id" : "test2_db.users-username_MinKey_id_MinKey" , "lastmod" : Timestamp(2, 1), "lastmodEpoch" : ObjectId( "58fe21de224dc86230e9a8f7" ), "ns" : "test2_db.users" , "min" : { "username" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } }, "max" : { "username" : "Join1" , "_id" : ObjectId( "58fe293756525c8a54e2a5af" ) }, "shard" : "shard-a" } { "_id" : "test2_db.users-username_\"Join1\"_id_ObjectId('58fe293756525c8a54e2a5af')" , "lastmod" : Timestamp(1, 2), "lastmodEpoch" : ObjectId( "58fe21de224dc86230e9a8f7" ), "ns" : "test2_db.users" , "min" : { "username" : "Join1" , "_id" : ObjectId( "58fe293756525c8a54e2a5af" ) }, "max" : { "username" : "Join2" , "_id" : ObjectId( "58fe293756525c8a54e2a5b0" ) }, "shard" : "shard-a" } { "_id" : "test2_db.users-username_\"Join2\"_id_ObjectId('58fe293756525c8a54e2a5b0')" , "lastmod" : Timestamp(2, 0), "lastmodEpoch" : ObjectId( "58fe21de224dc86230e9a8f7" ), "ns" : "test2_db.users" , "min" : { "username" : "Join2" , "_id" : ObjectId( "58fe293756525c8a54e2a5b0" ) }, "max" : { "username" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } }, "shard" : "shard-b" } # 查看每個數據塊的詳細分片信息,發現有兩個塊被存儲在 shard-a 副本集中,還有一個數據塊被存儲在 shard-b 副本集中 # 我們也可以通過 sh.status() 來更直觀的看到相關信息。 mongos> sh.status() --- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId( "58fe17e90b3df66581ff6b09" ) } shards: { "_id" : "shard-a" , "host" : "shard-a/test1.lan:27017,test2.lan:27017,test3.lan:27017" , "state" : 1 } { "_id" : "shard-b" , "host" : "shard-b/test1.lan:37017,test2.lan:37017,test3.lan:37017" , "state" : 1 } active mongoses: "3.4.4" : 1 autosplit: Currently enabled: yes balancer: Currently enabled: yes Currently running: no Balancer lock taken at Mon Apr 24 2017 23:21:13 GMT+0800 (CST) by ConfigServer:Balancer Failed balancer rounds in last 5 attempts: 0 Migration Results for the last 24 hours: 1 : Success databases: { "_id" : "test2_db" , "primary" : "shard-a" , "partitioned" : true } test2_db.users shard key: { "username" : 1, "_id" : 1 } unique: false balancing: true chunks: shard-a 2 shard-b 1 { "username" : { "$minKey" : 1 }, "_id" : { "$minKey" : 1 } } -->> { "username" : "Join1" , "_id" : ObjectId( "58fe293756525c8a54e2a5af" ) } on : shard-a Timestamp(2, 1) { "username" : "Join1" , "_id" : ObjectId( "58fe293756525c8a54e2a5af" ) } -->> { "username" : "Join2" , "_id" : ObjectId( "58fe293756525c8a54e2a5b0" ) } on : shard-a Timestamp(1, 2) { "username" : "Join2" , "_id" : ObjectId( "58fe293756525c8a54e2a5b0" ) } -->> { "username" : { "$maxKey" : 1 }, "_id" : { "$maxKey" : 1 } } on : shard-b Timestamp(2, 0) # 這個方法會打印所有的數據庫信息,並且包含範圍信息。 |
表象背後,MongoDB 底層依賴 2 個機制來保持集羣的平衡:分割與遷移。
分割是把一個大的數據庫分割爲 2 個更小的數據塊的過程。它只會在數據塊大小超過最大限制的時候纔會發生,目前的默認設置是 64MB。分割是必須的,因爲數據塊太大就難以在整個集合中分佈。
遷移就是在分片之間移動數據塊的過程。當某些分片服務器包含的數據塊數量大大超過其他分片服務器就會觸發遷移過程,這個觸發器叫做遷移回合(migration round)。在一個遷移回閤中,數據塊從某些分片服務器遷移到其他分片服務器,直到集羣看起來相對平衡爲止。我們可以想象一下這兩個操作,遷移比分割昂貴得多。
實際上,這些操作不應該影響我們,但是明白這一點非常有用,當遇到性能問題的時候就要想到可能它們正在遷移數據。如果插入的數據分佈均勻,各個分片上的數據集應該差不多以相同的速度增長,則遷移應該不會頻繁發生。
本文出自 “Professor哥” 博客,請務必保留此出處http://professor.blog.51cto.com/996189/1919016