千家信息网

57-4 数据库分片概念及mongodb sharding的实现

发表于:2025-01-21 作者:千家信息网编辑
千家信息网最后更新 2025年01月21日,04 数据库分片的概念及mongodb sharding的实现配置环境:node1: 192.168.1.121 CentOS release 6.7node2: 192.168.1.122 Cent
千家信息网最后更新 2025年01月21日57-4 数据库分片概念及mongodb sharding的实现

04 数据库分片的概念及mongodb sharding的实现


配置环境:

node1: 192.168.1.121 CentOS release 6.7

node2: 192.168.1.122 CentOS release 6.7

node3: 192.168.1.123 CentOS release 6.7

[root@node1 ~]# vim /etc/hosts

添加

192.168.1.121 node1

192.168.1.122 node2

192.168.1.123 node3

[root@node1 ~]# scp /etc/hosts node2:/etc

[root@node1 ~]# scp /etc/hosts node3:/etc

[root@node1 ~]# service mongod stop

[root@node1 ~]# vim /etc/mongod.conf

修改

#replSet=setname

replSet=testSet

replIndexPrefetch=_id_only

[root@node1 ~]# service mongod start

[root@node1 ~]# mongo

MongoDB shell version: 2.6.4

connecting to: test

> show dbs

admin (empty)

local 0.078GB

testdb 0.078GB


> use local

switched to db local

> show collections

startup_log

system.indexes

> exit

bye


[root@node1 mongodb-2.6.4]# scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node2:/root

[root@node1 mongodb-2.6.4]# scp mongodb-org-server-2.6.4-1.x86_64.rpm mongodb-org-tools-2.6.4-1.x86_64.rpm mongodb-org-shell-2.6.4-1.x86_64.rpm node3:/root


[root@node2 ~]# yum -y install *rpm

[root@node2 ~]# mkdir -p /mongodb/data

[root@node2 ~]# chown -R mongod.mongod /mongodb/


[root@node3 ~]# yum -y install *rpm

[root@node3 ~]# mkdir -p /mongodb/data

[root@node3 ~]# chown -R mongod.mongod /mongodb/


[root@node1 ~]# scp /etc/mongod.conf node2:/etc/

[root@node1 ~]# scp /etc/mongod.conf node3:/etc/


[root@node2 ~]# service mongod start

[root@node3 ~]# service mongod start


[root@node1 ~]# mongo

MongoDB shell version: 2.6.4

connecting to: test

> rs.status()

{

"startupStatus" : 3,

"info" : "run rs.initiate(...) if not yet done for the set",

"ok" : 0,

"errmsg" : "can't get local.system.replset config from self or any seed (EMPTYCONFIG)"

}


> rs.initiate()

{

"info2" : "no configuration explicitly specified -- making one",

"me" : "node1:27017",

"info" : "Config now saved locally. Should come online in about a minute.",

"ok" : 1

}


> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-08T14:33:14Z"),

"myState" : 1,

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 1316,

"optime" : Timestamp(1483885955, 1),

"optimeDate" : ISODate("2017-01-08T14:32:35Z"),

"electionTime" : Timestamp(1483885956, 1),

"electionDate" : ISODate("2017-01-08T14:32:36Z"),

"self" : true

}

],

"ok" : 1

}


#添加节点

testSet:PRIMARY> rs.add("192.168.1.122")

{ "ok" : 1 }

testSet:PRIMARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-08T14:38:50Z"),

"myState" : 1,

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 1652,

"optime" : Timestamp(1483886304, 1),

"optimeDate" : ISODate("2017-01-08T14:38:24Z"),

"electionTime" : Timestamp(1483885956, 1),

"electionDate" : ISODate("2017-01-08T14:32:36Z"),

"self" : true

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 26,

"optime" : Timestamp(1483886304, 1),

"optimeDate" : ISODate("2017-01-08T14:38:24Z"),

"lastHeartbeat" : ISODate("2017-01-08T14:38:48Z"),

"lastHeartbeatRecv" : ISODate("2017-01-08T14:38:48Z"),

"pingMs" : 1,

"syncingTo" : "node1:27017"

}

],

"ok" : 1

}


[root@node2 ~]# mongo

MongoDB shell version: 2.6.4

connecting to: test

Welcome to the MongoDB shell.

For interactive help, type "help".

For more comprehensive documentation, see

http://docs.mongodb.org/

Questions? Try the support group

http://groups.google.com/group/mongodb-user

testSet:SECONDARY> show dbs

admin (empty)

local 1.078GB

testdb 0.078GB

testSet:SECONDARY> use testdb;

switched to db testdb

testSet:SECONDARY> rs.slaveOk()

testSet:SECONDARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-09T12:02:14Z"),

"myState" : 2,

"syncingTo" : "node1:27017",

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 77028,

"optime" : Timestamp(1483886304, 1),

"optimeDate" : ISODate("2017-01-08T14:38:24Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:02:13Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:02:13Z"),

"pingMs" : 1,

"electionTime" : Timestamp(1483885956, 1),

"electionDate" : ISODate("2017-01-08T14:32:36Z")

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 77851,

"optime" : Timestamp(1483886304, 1),

"optimeDate" : ISODate("2017-01-08T14:38:24Z"),

"self" : true

}

],

"ok" : 1

}

testSet:SECONDARY> rs.isMaster()

{

"setName" : "testSet",

"setVersion" : 2,

"ismaster" : false,

"secondary" : true,

"hosts" : [

"192.168.1.122:27017",

"node1:27017"

],

"primary" : "node1:27017",

"me" : "192.168.1.122:27017",

"maxBsonObjectSize" : 16777216,

"maxMessageSizeBytes" : 48000000,

"maxWriteBatchSize" : 1000,

"localTime" : ISODate("2017-01-09T12:03:59.702Z"),

"maxWireVersion" : 2,

"minWireVersion" : 0,

"ok" : 1

}


testSet:PRIMARY> rs.isMaster()

{

"setName" : "testSet",

"setVersion" : 2,

"ismaster" : true,

"secondary" : false,

"hosts" : [

"node1:27017",

"192.168.1.122:27017"

],

"primary" : "node1:27017",

"me" : "node1:27017",

"maxBsonObjectSize" : 16777216,

"maxMessageSizeBytes" : 48000000,

"maxWriteBatchSize" : 1000,

"localTime" : ISODate("2017-01-09T12:05:47.182Z"),

"maxWireVersion" : 2,

"minWireVersion" : 0,

"ok" : 1

}


#增加新节点

testSet:PRIMARY> rs.add("192.168.1.123")

{ "ok" : 1 }


[root@node3 ~]# mongo

MongoDB shell version: 2.6.4

connecting to: test

Welcome to the MongoDB shell.

For interactive help, type "help".

For more comprehensive documentation, see

http://docs.mongodb.org/

Questions? Try the support group

http://groups.google.com/group/mongodb-user

testSet:SECONDARY> rs.slaveOk()

testSet:SECONDARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-09T12:10:20Z"),

"myState" : 2,

"syncingTo" : "node1:27017",

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 78,

"optime" : Timestamp(1483963739, 1),

"optimeDate" : ISODate("2017-01-09T12:08:59Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:10:18Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:10:19Z"),

"pingMs" : 1,

"electionTime" : Timestamp(1483885956, 1),

"electionDate" : ISODate("2017-01-08T14:32:36Z")

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 78,

"optime" : Timestamp(1483963739, 1),

"optimeDate" : ISODate("2017-01-09T12:08:59Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:10:18Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:10:18Z"),

"pingMs" : 1,

"syncingTo" : "node1:27017"

},

{

"_id" : 2,

"name" : "192.168.1.123:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 78317,

"optime" : Timestamp(1483963739, 1),

"optimeDate" : ISODate("2017-01-09T12:08:59Z"),

"self" : true

}

],

"ok" : 1

}

testSet:SECONDARY> use testdb

switched to db testdb

testSet:SECONDARY> db.students.findOne()

{ "_id" : ObjectId("5871e94113222f399a5240a3"), "name" : "tom", "age" : 23 }


testSet:SECONDARY> rs.conf()

{

"_id" : "testSet",

"version" : 3,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017"

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


testSet:PRIMARY> use testdb

switched to db testdb

testSet:PRIMARY> db.classes.insert({class: "One",nostu: 40})

WriteResult({ "nInserted" : 1 })

testSet:PRIMARY> show collections;

classes

students

system.indexes


testSet:SECONDARY> db.classes.findOne()

{

"_id" : ObjectId("58737e8606a316aec46edfdc"),

"class" : "One",

"nostu" : 40

}


testSet:SECONDARY> db.classes.insert({class: "Two", nostu: 50})

WriteResult({ "writeError" : { "code" : undefined, "errmsg" : "not master" } })


testSet:SECONDARY> rs.conf()

{

"_id" : "testSet",

"version" : 3,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017"

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


#使主结点"下台"

testSet:PRIMARY> rs.stepDown()

2017-01-09T20:23:48.978+0800 DBClientCursor::init call() failed

2017-01-09T20:23:48.980+0800 Error: error doing query: failed at src/mongo/shell/query.js:81

2017-01-09T20:23:48.982+0800 trying reconnect to 127.0.0.1:27017 (127.0.0.1) failed

2017-01-09T20:23:48.984+0800 reconnect 127.0.0.1:27017 (127.0.0.1) ok

testSet:SECONDARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-09T12:24:27Z"),

"myState" : 2,

"syncingTo" : "192.168.1.123:27017",

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 79989,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"infoMessage" : "syncing to: 192.168.1.123:27017",

"self" : true

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 78363,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:24:25Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:24:25Z"),

"pingMs" : 1,

"lastHeartbeatMessage" : "syncing to: node1:27017",

"syncingTo" : "node1:27017"

},

{

"_id" : 2,

"name" : "192.168.1.123:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 928,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:24:26Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:24:25Z"),

"pingMs" : 1,

"electionTime" : Timestamp(1483964629, 1),

"electionDate" : ISODate("2017-01-09T12:23:49Z")

}

],

"ok" : 1

}


testSet:PRIMARY> db.printReplicationInfo()

configured oplog size: 990MB

log length start to end: 299secs (0.08hrs)

oplog first event time: Mon Jan 09 2017 20:08:59 GMT+0800 (CST)

oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)

now: Mon Jan 09 2017 20:27:20 GMT+0800 (CST)



testSet:SECONDARY> db.printReplicationInfo()

configured oplog size: 990MB

log length start to end: 77734secs (21.59hrs)

oplog first event time: Sun Jan 08 2017 22:38:24 GMT+0800 (CST)

oplog last event time: Mon Jan 09 2017 20:13:58 GMT+0800 (CST)

now: Mon Jan 09 2017 20:28:01 GMT+0800 (CST)


testSet:SECONDARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-09T12:29:38Z"),

"myState" : 2,

"syncingTo" : "node1:27017",

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 78672,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:29:37Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:29:37Z"),

"pingMs" : 1,

"syncingTo" : "192.168.1.123:27017"

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 79495,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"self" : true

},

{

"_id" : 2,

"name" : "192.168.1.123:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 1238,

"optime" : Timestamp(1483964038, 1),

"optimeDate" : ISODate("2017-01-09T12:13:58Z"),

"lastHeartbeat" : ISODate("2017-01-09T12:29:37Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T12:29:37Z"),

"pingMs" : 1,

"electionTime" : Timestamp(1483964629, 1),

"electionDate" : ISODate("2017-01-09T12:23:49Z")

}

],

"ok" : 1

}


#node1节点下线

testSet:SECONDARY> exit

bye

[root@node1 ~]# service mongod stop

Stopping mongod: [ OK ]


#保存配置文件至cfg中(必须在主节点配置)

testSet:PRIMARY> cfg=rs.conf()

{

"_id" : "testSet",

"version" : 3,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017"

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


#设定节点优先级

testSet:PRIMARY> cfg.members[1].priority=2

2

#重读cfg配置文件

testSet:PRIMARY> rs.reconfig(cfg)

2017-01-09T21:08:58.403+0800 DBClientCursor::init call() failed

2017-01-09T21:08:58.404+0800 Error: error doing query: failed at src/mongo/shell/query.js:81

2017-01-09T21:08:58.406+0800 trying reconnect to 127.0.0.1:27017 (127.0.0.1) failed

2017-01-09T21:08:58.407+0800 reconnect 127.0.0.1:27017 (127.0.0.1) ok

testSet:SECONDARY>

testSet:SECONDARY> rs.status()

{

"set" : "testSet",

"date" : ISODate("2017-01-09T13:09:46Z"),

"myState" : 2,

"syncingTo" : "192.168.1.122:27017",

"members" : [

{

"_id" : 0,

"name" : "node1:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 98,

"optime" : Timestamp(1483967288, 1),

"optimeDate" : ISODate("2017-01-09T13:08:08Z"),

"lastHeartbeat" : ISODate("2017-01-09T13:09:45Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T13:09:45Z"),

"pingMs" : 1,

"lastHeartbeatMessage" : "syncing to: 192.168.1.122:27017",

"syncingTo" : "192.168.1.122:27017"

},

{

"_id" : 1,

"name" : "192.168.1.122:27017",

"health" : 1,

"state" : 1,

"stateStr" : "PRIMARY",

"uptime" : 98,

"optime" : Timestamp(1483967288, 1),

"optimeDate" : ISODate("2017-01-09T13:08:08Z"),

"lastHeartbeat" : ISODate("2017-01-09T13:09:45Z"),

"lastHeartbeatRecv" : ISODate("2017-01-09T13:09:46Z"),

"pingMs" : 1,

"electionTime" : Timestamp(1483967290, 1),

"electionDate" : ISODate("2017-01-09T13:08:10Z")

},

{

"_id" : 2,

"name" : "192.168.1.123:27017",

"health" : 1,

"state" : 2,

"stateStr" : "SECONDARY",

"uptime" : 81883,

"optime" : Timestamp(1483967288, 1),

"optimeDate" : ISODate("2017-01-09T13:08:08Z"),

"infoMessage" : "syncing to: 192.168.1.122:27017",

"self" : true

}

],

"ok" : 1

}


testSet:SECONDARY> rs.conf()

{

"_id" : "testSet",

"version" : 4,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017",

"priority" : 2

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


testSet:PRIMARY> cfg=rs.conf()

{

"_id" : "testSet",

"version" : 4,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017",

"priority" : 2

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


testSet:PRIMARY> cfg.members[2].arbiterOnly=true

true

testSet:PRIMARY> rs.reconfig(cfg)

{

"errmsg" : "exception: arbiterOnly may not change for members",

"code" : 13510,

"ok" : 0

}


testSet:PRIMARY> rs.conf()

{

"_id" : "testSet",

"version" : 4,

"members" : [

{

"_id" : 0,

"host" : "node1:27017"

},

{

"_id" : 1,

"host" : "192.168.1.122:27017",

"priority" : 2

},

{

"_id" : 2,

"host" : "192.168.1.123:27017"

}

]

}


testSet:PRIMARY> rs.printSlaveReplicationInfo()

source: node1:27017

syncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)

0 secs (0 hrs) behind the primary

source: 192.168.1.123:27017

syncedTo: Mon Jan 09 2017 21:08:08 GMT+0800 (CST)

0 secs (0 hrs) behind the primary


[root@node1 ~]# service mongod stop

[root@node2 ~]# service mongod stop

[root@node1 ~]# rm -rf /mongodb/

[root@node2 ~]# rm -rf /mongodb/data/

[root@node3 ~]# rm -rf /mongodb/data/



[root@node3 ~]# scp *rpm node4:/root

[root@node4 ~]# mkdir -p /mongodb/data

[root@node4 ~]# yum -y install *rpm

[root@node4 ~]# chown -R mongod.mongod /mongodb/

[root@node2 ~]# vim /etc/mongod.conf

修改

replSet=testSet

replIndexPrefetch=_id_only

#replSet=testSet

#replIndexPrefetch=_id_only

添加

dbpath=/mongodb/data

configsvr=true

[root@node2 ~]# install -o mongod -g mongod -d /mongodb/data

[root@node2 ~]# ls -ld /mongodb/data/

drwxr-xr-x 2 mongod mongod 4096 Jan 9 22:13 /mongodb/data/

[root@node2 ~]# service mongod start


[root@node1 ~]# cd mongodb-2.6.4/

[root@node1 mongodb-2.6.4]# yum -y install mongodb-org-mongos-2.6.4-1.x86_64.rpm


[root@node2 ~]# service mongod stop

[root@node2 ~]# rm -rf /mongodb/data/*

[root@node2 ~]# service mongod start


[root@node1 mongodb-2.6.4]# mongos --configdb=192.168.1.122 --fork

[root@node1 mongodb-2.6.4]# mongos --configdb=192.168.1.122 --fork --logpath=/var/log/mongodb/mongod.log

2017-01-09T22:28:03.812+0800 warning: running with 1 config server should be done only for testing purposes and is not recommended for production

about to fork child process, waiting until server is ready for connections.

forked process: 18397

child process started successfully, parent exiting


[root@node1 mongodb-2.6.4]# mongo --host 192.168.1.121

MongoDB shell version: 2.6.4

connecting to: 192.168.1.121:27017/test


[root@node3 ~]# install -o mongod -g mongod -d /mongodb/data

[root@node3 ~]# vim /etc/mongod.conf

修改

replSet=testSet

replIndexPrefetch=_id_only

#replSet=testSet

#replIndexPrefetch=_id_only

[root@node3 ~]# service mongod start

[root@node4 ~]# vim /etc/mongod.conf

修改

dbpath=/var/lib/mongo

dbpath=/mongodb/data

修改

bind_ip=127.0.0.1

#bind_ip=127.0.0.1

[root@node4 ~]# service mongod start


mongos> sh.addShard("192.168.1.122")

{

"ok" : 0,

"errmsg" : "couldn't connect to new shard socket exception [CONNECT_ERROR] for 192.168.1.122:27017"

}


mongos> sh.status()

--- Sharding Status ---

sharding version: {

"_id" : 1,

"version" : 4,

"minCompatibleVersion" : 4,

"currentVersion" : 5,

"clusterId" : ObjectId("58739d7487c21f53b917098b")

}

shards:

databases:

{ "_id" : "admin", "partitioned" : false, "primary" : "config" }


mongos> sh.addShard("192.168.1.123")

{

"ok" : 0,

"errmsg" : "host is part of set testSet, use replica set url format /,,...."

}

100:33(91411)


0