Docker Swarm 下搭建 MongoDB 分片+副本+选举集群

一、环境准备

三台服务器,建立 Docker Swarm 集群,一个 Manager,两个 Worker。

  • docker 版本:17-09
  • mongo 版本:3.6

二、MongoDB 集群架构设计

未分类

高清图地址: https://www.processon.com/view/link/5a3c7386e4b0bf89b8530376

三、搭建集群

1、【Manager】创建集群网络

docker network create -d overlay --attachable mongo

–attachable 允许其他容器加入此网络

2、创建 9 个 Data 服务,3 个 Config 服务,1 个 Global 模式的 Mongos 服务

2.1、【所有机器】创建相关文件夹

mkdir /root/mongo/config /root/mongo/shard1 /root/mongo/shard2 /root/mongo/shard3

2.2、【Manager】创建 stack.yml

version: '3.3'
services:
  mongors1n1:
    # docker 中国的镜像加速地址
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard1 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard1:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        # 指定在服务器 manager 上启动
        constraints:
          - node.hostname==manager
  mongors2n1:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard2 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard2:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==manager
  mongors3n1:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard3 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard3:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==manager
  mongors1n2:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard1 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard1:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker1
  mongors2n2:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard2 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard2:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker1
  mongors3n2:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard3 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard3:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker1
  mongors1n3:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard1 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard1:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker2
  mongors2n3:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard2 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard2:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker2
  mongors3n3:
    image: registry.docker-cn.com/library/mongo
    command: mongod --shardsvr --replSet shard3 --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/shard3:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker2
  cfg1:
    image: registry.docker-cn.com/library/mongo
    command: mongod --configsvr --replSet cfgrs --smallfiles --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/config:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==manager
  cfg2:
    image: registry.docker-cn.com/library/mongo
    command: mongod --configsvr --replSet cfgrs --smallfiles --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/config:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker1
  cfg3:
    image: registry.docker-cn.com/library/mongo
    command: mongod --configsvr --replSet cfgrs --smallfiles --dbpath /data/db --port 27017
    networks:
      - mongo
    volumes:
      - /etc/localtime:/etc/localtime
      - /root/mongo/config:/data/db
    deploy:
      restart_policy:
        condition: on-failure
      replicas: 1
      placement:
        constraints:
          - node.hostname==worker2
  mongos:
    image: registry.docker-cn.com/library/mongo
    # mongo 3.6 版默认绑定IP为 127.0.0.1,此处绑定 0.0.0.0 是允许其他容器或主机可以访问
    command: mongos --configdb cfgrs/cfg1:27017,cfg2:27017,cfg3:27017 --bind_ip 0.0.0.0 --port 27017
    networks:
      - mongo
    # 映射宿主机的 27017 端口
    ports:
      - 27017:27017
    volumes:
      - /etc/localtime:/etc/localtime
    depends_on:
      - cfg1
      - cfg2
      - cfg3
    deploy:
      restart_policy:
        condition: on-failure
      # 在集群内的每一台服务器上都启动一个容器
      mode: global
networks:
  mongo:
    external: true

2.3、启动服务,在 Manager 上执行

docker stack deploy -c stack.yml mongo

2.4、【Manager】查看服务的启动情况

docker service ls

正常情况下,会出现如下结果:

[docker@manager ~]# docker service ls
ID                  NAME                MODE                REPLICAS            IMAGE                                         PORTS
z1l5zlghlfbi        mongo_cfg1          replicated          1/1                 registry.docker-cn.com/library/mongo:latest
lg9vbods29th        mongo_cfg2          replicated          1/1                 registry.docker-cn.com/library/mongo:latest
i6d6zwxsq0ss        mongo_cfg3          replicated          1/1                 registry.docker-cn.com/library/mongo:latest
o0lfdavd8kpj        mongo_mongors1n1    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
n85yeyod7mlu        mongo_mongors1n2    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
cwurdqng9tdk        mongo_mongors1n3    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
vu6al5kys28u        mongo_mongors2n1    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
xrjiep0vrf0w        mongo_mongors2n2    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
qqzifwcejjyk        mongo_mongors2n3    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
tddgw8hygv1b        mongo_mongors3n1    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
qrb6fjty03mw        mongo_mongors3n2    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
m8ikdzjssmhn        mongo_mongors3n3    replicated          1/1                 registry.docker-cn.com/library/mongo:latest
mnnlm49b7kyb        mongo_mongos        global              3/3                 registry.docker-cn.com/library/mongo:latest   *:27017->27017/tcp

3、初始化集群

3.1 【Manager】初始化 Mongo 配置集群

docker exec -it $(docker ps | grep "cfg1" | awk '{ print $1 }') bash -c "echo 'rs.initiate({_id: "cfgrs",configsvr: true, members: [{ _id : 0, host : "cfg1" },{ _id : 1, host : "cfg2" }, { _id : 2, host : "cfg3" }]})' | mongo"

3.2 【Manager】初始化三个 Mongo 数据集群

docker exec -it $(docker ps | grep "mongors1n1" | awk '{ print $1 }') bash -c "echo 'rs.initiate({_id : "shard1", members: [{ _id : 0, host : "mongors1n1" },{ _id : 1, host : "mongors1n2" },{ _id : 2, host : "mongors1n3", arbiterOnly: true }]})' | mongo"

docker exec -it $(docker ps | grep "mongors2n1" | awk '{ print $1 }') bash -c "echo 'rs.initiate({_id : "shard2", members: [{ _id : 0, host : "mongors2n1" },{ _id : 1, host : "mongors2n2" },{ _id : 2, host : "mongors2n3", arbiterOnly: true }]})' | mongo"

docker exec -it $(docker ps | grep "mongors3n1" | awk '{ print $1 }') bash -c "echo 'rs.initiate({_id : "shard3", members: [{ _id : 0, host : "mongors3n1" },{ _id : 1, host : "mongors3n2" },{ _id : 2, host : "mongors3n3", arbiterOnly: true }]})' | mongo"

3.3 【Manager】将三个数据集群当做分片加入 mongos

docker exec -it $(docker ps | grep "mongos" | awk '{ print $1 }') bash -c "echo 'sh.addShard("shard1/mongors1n1:27017,mongors1n2:27017,mongors1n3:27017")' | mongo "

docker exec -it $(docker ps | grep "mongos" | awk '{ print $1 }') bash -c "echo 'sh.addShard("shard2/mongors2n1:27017,mongors2n3:27017,mongors2n3:27017")' | mongo "

docker exec -it $(docker ps | grep "mongos" | awk '{ print $1 }') bash -c "echo 'sh.addShard("shard3/mongors3n1:27017,mongors3n2:27017,mongors3n3:27017")' | mongo "

4、连接集群

4.1 内部:在 mongo 网络下的容器,通过 mongos:27017 连接

4.2 外部:通过 IP:27017 连接,IP 可以为三台服务的中的一个的 IP

Linux下安装MongoDB的实现步骤

Linux下安装MongoDB的实现步骤

Mongo DB 是目前在IT行业非常流行的一种非关系型数据库(NoSql),其灵活的数据存储方式备受当前IT从业人员的青睐。Mongo DB很好的实现了面向对象的思想(OO思想),在Mongo DB中 每一条记录都是一个Document对象。Mongo DB最大的优势在于所有的数据持久操作都无需开发人员手动编写SQL语句,直接调用方法就可以轻松的实现CRUD操作。本文介绍了如何快速安装mongodb供大家参考。

一、安装配置mongodb

Step 1: 设置系统环境及确保缺省端口27107可用

###当前环境
# cat /etc/issue
Red Hat Enterprise Linux Server release 6.5 (Santiago)

# vi /etc/selinux/config
SELINUX=disabled

Step 2: 下载安装文件

下载地址: https://www.mongodb.org/downloads. 
或者直接在命令提示符下使用curl命令下载
curl -O https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.0.6.tgz

Step 3: 解压下载的文件

# pwd
/usr/local/src
# tar -xvf mongodb-linux-x86_64-rhel62-3.0.6.gz ###注,本文直接从网站下载,所以文件为.gz

Step 4: 复制解压文件到运行目录

# mkdir -p /var/lib/mongodb
# cp -R -n /usr/local/src/mongodb-linux-x86_64-rhel62-3.0.6/. /var/lib/mongodb/

Step 5: 设置环境变量

e.g. export PATH=<mongodb-install-directory>/bin:$PATH
# vi ~/.bash_profile 
 export PATH=/var/lib/mongodb/bin:$PATH
# source ~/.bash_profile

Step 6: 创建数据目录

# mkdir -p /data/mongodata

二、启动及验证mongodb

###启动mongo
# mongod --dbpath /data/mongodata

###以下内容为启动后输出的相关信息
2015-10-28T10:03:33.100+0800 I JOURNAL [initandlisten] journal dir=/data/mongodata/journal
2015-10-28T10:03:33.101+0800 I JOURNAL [initandlisten] recover : no journal files present, no recovery needed
2015-10-28T10:03:33.264+0800 I JOURNAL [initandlisten] preallocateIsFaster=true 2.18
2015-10-28T10:03:33.398+0800 I JOURNAL [durability] Durability thread started
2015-10-28T10:03:33.398+0800 I JOURNAL [journal writer] Journal writer thread started
2015-10-28T10:03:33.401+0800 I CONTROL [initandlisten] MongoDB starting : pid=10191 port=27017 dbpath=/data/mongodata 64-bit host=java_2
2015-10-28T10:03:33.401+0800 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-10-28T10:03:33.401+0800 I CONTROL [initandlisten] 
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] 
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] **    We suggest setting it to 'never'
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] 
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] **    We suggest setting it to 'never'
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] 
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] db version v3.0.6
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] git version: 1ef45a23a4c5e3480ac919b28afcba3c615488f2
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] build info: Linux ip-10-67-194-123 2.6.32-220.el6.x86_64 #1 SMP Wed Nov 9 08:03:13 EST 2011 x86_64 BOOST_LIB_VERSION=1_49
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] allocator: tcmalloc
2015-10-28T10:03:33.402+0800 I CONTROL [initandlisten] options: { storage: { dbPath: "/data/mongodata" } }
2015-10-28T10:03:33.404+0800 I INDEX  [initandlisten] allocating new ns file /data/mongodata/local.ns, filling with zeroes...
2015-10-28T10:03:33.491+0800 I STORAGE [FileAllocator] allocating new datafile /data/mongodata/local.0, filling with zeroes...
2015-10-28T10:03:33.491+0800 I STORAGE [FileAllocator] creating directory /data/mongodata/_tmp
2015-10-28T10:03:33.497+0800 I STORAGE [FileAllocator] done allocating datafile /data/mongodata/local.0, size: 64MB, took 0.001 secs
2015-10-28T10:03:33.511+0800 I NETWORK [initandlisten] waiting for connections on port 27017

###停止mongo,直接使用ctrl + c
^C2015-10-28T10:09:21.510+0800 I CONTROL [signalProcessingThread] got signal 2 (Interrupt), will terminate after current cmd ends
2015-10-28T10:09:21.511+0800 I CONTROL [signalProcessingThread] now exiting
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets...
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] closing listening socket: 5
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] closing listening socket: 6
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-27017.sock
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog...
2015-10-28T10:09:21.511+0800 I NETWORK [signalProcessingThread] shutdown: going to close sockets...
2015-10-28T10:09:21.512+0800 I STORAGE [signalProcessingThread] shutdown: waiting for fs preallocator...
2015-10-28T10:09:21.512+0800 I STORAGE [signalProcessingThread] shutdown: final commit...
2015-10-28T10:09:21.512+0800 I JOURNAL [signalProcessingThread] journalCleanup...
2015-10-28T10:09:21.512+0800 I JOURNAL [signalProcessingThread] removeJournalFiles
2015-10-28T10:09:21.515+0800 I JOURNAL [signalProcessingThread] Terminating durability thread ...
2015-10-28T10:09:21.615+0800 I JOURNAL [journal writer] Journal writer thread stopped
2015-10-28T10:09:21.615+0800 I JOURNAL [durability] Durability thread stopped
2015-10-28T10:09:21.615+0800 I STORAGE [signalProcessingThread] shutdown: closing all files...
2015-10-28T10:09:21.618+0800 I STORAGE [signalProcessingThread] closeAllFiles() finished
2015-10-28T10:09:21.618+0800 I STORAGE [signalProcessingThread] shutdown: removing fs lock...
2015-10-28T10:09:21.618+0800 I CONTROL [signalProcessingThread] dbexit: rc: 0

###修复启动过程中的两个警告,关于使用root用户启动mongo的警告先忽略
# echo "never" > /sys/kernel/mm/transparent_hugepage/enabled
# echo "never" > /sys/kernel/mm/transparent_hugepage/defrag

###再次重启,后置于后台进程,
# mongod --dbpath /data/mongodata &

###查看启动后的进程
# ps -ef|grep mongo |grep -v grep
root   11115 27956 0 10:11 pts/2  00:00:00 mongod --dbpath /data/mongodata
# lsof -i:27017
COMMAND  PID USER  FD  TYPE  DEVICE SIZE/OFF NODE NAME
mongod 11115 root  5u IPv4 50567119   0t0 TCP *:27017 (LISTEN)

###使用mongo连接到mongod
# mongo
MongoDB shell version: 3.0.6
connecting to: test
2015-10-28T10:14:30.685+0800 I NETWORK [initandlisten] connection accepted from 127.0.0.1:53907 #1 (1 connection now open)
Server has startup warnings: 
2015-10-28T10:11:49.217+0800 I CONTROL [initandlisten] ** WARNING: You are running this process as the root user, which is not recommended.
2015-10-28T10:11:49.217+0800 I CONTROL [initandlisten] 
> help
    db.help()          help on db methods
    db.mycoll.help()       help on collection methods
    sh.help()          sharding helpers
    rs.help()          replica set helpers
    help admin          administrative help
    help connect         connecting to a db help
    help keys          key shortcuts
    help misc          misc things to know
    help mr           mapreduce


    show dbs           show database names
    show collections       show collections in current database
    show users          show users in current database
    show profile         show most recent system.profile entries with time >= 1ms
    show logs          show the accessible logger names
    show log [name]       prints out the last segment of log in memory, 'global' is default
    use <db_name>        set current database
    db.foo.find()        list objects in collection foo
    db.foo.find( { a : 1 } )   list objects in foo where a == 1
    it              result of the last line evaluated; use to further iterate
    DBQuery.shellBatchSize = x  set default number of items to display on shell
    exit             quit the mongo shell
> db.getCollection("version");
test.version
> exit
bye

三、mongodb相关工具

###在安装文件下有README,描述了常用的mongodb相关命令行工具
# more /usr/local/mongodb/README 
MongoDB README
Welcome to MongoDB!
COMPONENTS

 bin/mongod - The database process.
 bin/mongos - Sharding controller.
 bin/mongo - The database shell (uses interactive javascript).

UTILITIES

 bin/mongodump     - MongoDB dump tool - for backups, snapshots, etc..
 bin/mongorestore   - MongoDB restore a dump
 bin/mongoexport    - Export a single collection to test (JSON, CSV)
 bin/mongoimport    - Import from JSON or CSV
 bin/mongofiles    - Utility for putting and getting files from MongoDB GridFS
 bin/mongostat     - Show performance statistics

RUNNING

 For command line options invoke:

  $ ./mongod --help

 To run a single server database:

  $ mkdir /data/db
  $ ./mongod
  $
  $ # The mongo javascript shell connects to localhost and test database by default:
  $ ./mongo 
  > help

DRIVERS

 Client drivers for most programming languages are available at mongodb.org. Use the 
 shell ("mongo") for administrative tasks.

###获取单个命令用法
# <command> --help
# mongod --help|more
Options:

General options:
 -h [ --help ]        show this usage information
 --version          show version information

# mongod --version
db version v3.0.6
git version: 1ef45a23a4c5e3480ac919b28afcba3c615488f2

mongodb中save和update的区别

save:更新数据结构;
update:只更新数据。

mongodb数据如下(把status.arr更新成[3,2,1]):

{
    "_id" : 0,
    "status" : {
        "id" : 1,
        "username" : "root",
        "pass" : "123456",
        "msg" : "no",
        "arr" : [1,2,3]
    }
}

使用save更新数据

var otherData = {
    "_id" : 0,
    "status" : {
        "id" : 1,
        "username" : "root",
        "pass" : "123456",
        "msg" : "no",
        "arr" : [3,2,1]
    }
}
// 使用save方法会更新数据结构
// 所以更新一个数据需要把数据结构一起写好
mongodb.save(otherData,function(err,result){

})

使用update更新数据

mongodb.update({"_id":"0"},{$set:{"status.arr":[3,2,1]}},function(err,result){

})

mongodb禁止外网访问及添加账号的操作方法

那么我将从两个方面提高mongo数据库的安全防护系数

1. 设置mongodb外网禁止访问

启动数据库时,额外添加–bind_ip 127.0.0.1即可

./mongod --bind_ip 127.0.0.1 --dbpath /data/db --auth

也可以通过修改/etc/mongod.conf文件添加一行代码

#只监听本地接口,多个接口用,隔开

bind_ip = 127.0.0.1

2. 为数据库设置账号密码登录权限

为了保证数据库需要账号密码才能连接,那么在启动数据库的时候需要添加auth参数

./mongod --dbpath /data/db --auth

也可以通过修改/etc/mongod.conf文件添加一行代码

uth = true

这样在进行数据库连接的时候需要相应的账号密码才能成功访问。

如果之前数据库未设置账号密码的话,那么需要先添加一个管理员账户,

> use admin
switched to db admin
> db.createUser({user:"root",pwd:"123456",roles:["userAdminAnyDatabase"]})
Successfully added user: { "user" : "root", "roles" : [ "userAdminAnyDatabase" ] }

此时我们就成功的创建了一个管理员账户 账户名 root 密码 123456 ,此时执行show dbs会出现如下报错

> show dbs
2017-12-03T22:14:58.418+0800 E QUERY  [thread1] Error: listDatabases failed:{
  "ok" : 0,
  "errmsg" : "not authorized on admin to execute command { listDatabases: 1.0 }",
  "code" : 13,
  "codeName" : "Unauthorized"
} .....

上边提示意为当前admin数据库未经授权无法执行指令,因此需要以管理员身份登录验证,如下操作

> db.auth('dpd','123456')

上边执行登录操作,输出结果 1,说明登录成功。此时再执行 show dbs 则会成功输出结果。此刻我们是以管理员的身份登录数据库,如果切换到test数据库,执行db.blog.insert({name:1})会报错如下,同样意为该数据库未经授权无法操作:

> use test
switched to db test
> db.blog.insert({name:1})
WriteResult({
  "writeError" : {
    "code" : 13,
    "errmsg" : "not authorized on test to execute command { insert: "blog", documents: [ { _id: ObjectId('5a240d8e2d43081ea4271cc8'), name: 1.0 } ], ordered: true }"
  }
})

所以要为test数据库添加一个用户,并以该用户身份登录才可以执行对该数据的操作。

> db.createUser({user:'use1',pwd:'123456',roles:["readWrite"]})
Successfully added user: { "user" : "use1", "roles" : [ "readWrite" ] }
> db.auth('use1','123456')
1
> db.blog.insert({name:1})
WriteResult({ "nInserted" : 1 })
>

至此,完成了通过账号和密码登录权限对数据库的访问和操作。

那么最终标准的连接test数据库的URI语法如下:

mongodb://use1:123456@localhost:27017/test

总结

以上所述是小编给大家介绍的mongodb禁止外网访问及添加账号的操作方法,希望对大家有所帮助,如果大家有任何疑问请给我留言,小编会及时回复大家的。在此也非常感谢大家对编程小技巧网站的支持!

ubuntu 安装MongoDB

ubuntu (16.04)下使用mongodb

ubuntu 安装MongoDB

安装mongodb

可以根据官网提供的方法进行安装

官网入口: https://docs.mongodb.com/manual/tutorial/install-mongodb-on-ubuntu/#install-mongodb-community-edition

导入MongoDB public GPG Key

$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 0C49F3730359A14518585931BC711F9BA15703C6

为mongodb创建一个列表文件

$ echo "deb [ arch=amd64,arm64 ] http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/3.4 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-3.4.list

更新

$ sudo apt-get update

安装mongodb

$ sudo apt-get install -y mongodb-org
  • 注:mongodb的配置文件所在目录为/etc/mongodb.conf

启动mongodb

$ sudo service mongod start

查看mongodb的守护进程

$ ps aux | grep mongodt

关闭mongodb

$ sudo service mongod stop

卸载mongodb

关闭mongodb

$ sudo service mongod stop

卸载所有安装包

$ sudo apt-get purge mongodb-org*

移除数据库和日志文件(具体看/etc/mongodb.conf配置文件)

$ sudo rm -r /var/log/mongodb
$ sudo rm -r /var/lib/mongodb

./etc/mongodb.conf配置文件如下(部分数据):

# mongodb.conf
sudo apt-get purge mongodb-org*

# Where to store the data.
dbpath=/var/lib/mongodb   //数据库路径

#where to log
logpath=/var/log/mongodb/mongodb.log  日志路径

logappend=true

bind_ip = 127.0.0.1
#port = 27017

# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
journal=true

# Enables periodic logging of CPU utilization and I/O wait
#cpu = true

微服务MySQL分库分表数据到MongoDB同步方案

需求背景

近年来,微服务概念持续火热,网络上针对微服务和单体架构的讨论也是越来越多,面对日益增长的业务需求是,很多公司做技术架构升级时优先选用微服务方式。我所在公司也是选的这个方向来升级技术架构,以支撑更大访问量和更方便的业务扩展。

发现问题

微服务拆分主要分两种方式:拆分业务系统不拆分数据库,拆分业务系统拆分库。如果数据规模小的话大可不必拆分数据库,因为拆分数据看必将面对多维度数据查询,跨进程之间的事务等问题。而我所在公司随着业务发展单数据库实例已经不能满足业务需要,所以选择了拆分业务系统同时拆分数据库的模式,所以也面临着以上的问题。本文主要介绍多维度数据实时查询解决方案。当前系统架构和存储结构如下:

未分类

解决思路

  • 要对多数据库数据进行查询,首先就需要将数据库同步到一起以方便查询
  • 为了满足大数据量数据需求,所以优先选择NOSQL数据库做同步库
  • NOSQL数据库基本无法进行关联查询,所以需要将关系数据进行拼接操作,转换成非关系型数据
  • 业务多维度查询需要实时性,所以需要选择NOSQL中实时性相对比较好的数据库:MongoDB

根据以上思路,总结数据整合架构如下图所示:

未分类

解决方案

目前网上一些数据同步案例分两种:MQ消息同步和binlog数据读取同步

先说MQ消息同步,该同步方式我所在公司试用过一段时间,发现以下问题:

  • 数据围绕业务进行,对业务关键性数据操作发送MQ消息,对业务系统依赖性比较高
  • 对于数据库中存量数据需要单独处理
  • 对于工具表还需要单独维护同步
  • 每次新增数据表都需要重新添加MQ逻辑

考虑到以上问题,用MQ方式同步数据并不是最优解决办法

使用binlog 数据读取方式目前有一些成熟方案,比如tungsten replicator,但这些同步工具只能实现数据1:1复制,数据复制过程自定义逻辑添加比较麻烦,不支持分库分表数据归集操作。综上所述,最优方案应该是读取后binlog后自行处理后续数据逻辑。目前binlog读取binlog工具中最成熟的方案应该就是alibaba开源的canal了。

canal

canal是阿里巴巴mysql数据库binlog的增量订阅&消费组件 。阿里云DRDS、阿里巴巴TDDL 二级索引、小表复制. 都是基于canal做的,应用广泛。

canal原理相对比较简单:

  • canal模拟mysql slave的交互协议,伪装自己为mysql slave,向mysql master发送dump协议
  • mysql master收到dump请求,开始推送binary log给slave(也就是canal)
  • canal解析binary log对象(原始为byte流)

canal介绍: https://github.com/alibaba/canal/wiki

我使用的是canal的HA模式,由zookeeper选举可用实例,每个数据库一个instance,服务端配置如下:

目录:

conf
    database1
        -instance.properties
    database2
        -instance.properties
    canal.properties

instance.properties

canal.instance.mysql.slaveId = 1001
canal.instance.master.address = X.X.X.X:3306
canal.instance.master.journal.name = 
canal.instance.master.position = 
canal.instance.master.timestamp = 
canal.instance.dbUsername = canal
canal.instance.dbPassword = canal
canal.instance.defaultDatabaseName =
canal.instance.connectionCharset = UTF-8
canal.instance.filter.regex = .*\..*
canal.instance.filter.black.regex =  

canal.properties

canal.id= 1
canal.ip=X.X.X.X
canal.port= 11111
canal.zkServers=X.X.X.X:2181,X.X.X.X:2181,X.X.X.X:2181
canal.zookeeper.flush.period = 1000
canal.file.data.dir = ${canal.conf.dir}
canal.file.flush.period = 1000
canal.instance.memory.buffer.size = 16384
canal.instance.memory.buffer.memunit = 1024 
canal.instance.memory.batch.mode = MEMSIZE
canal.instance.detecting.enable = true
canal.instance.detecting.sql = select 1
canal.instance.detecting.interval.time = 3
canal.instance.detecting.retry.threshold = 3
canal.instance.detecting.heartbeatHaEnable = false
canal.instance.transaction.size =  1024
canal.instance.fallbackIntervalInSeconds = 60
canal.instance.network.receiveBufferSize = 16384
canal.instance.network.sendBufferSize = 16384
canal.instance.network.soTimeout = 30
canal.instance.filter.query.dcl = true
canal.instance.filter.query.dml = false
canal.instance.filter.query.ddl = false
canal.instance.filter.table.error = false
canal.instance.filter.rows = false
canal.instance.binlog.format = ROW,STATEMENT,MIXED 
canal.instance.binlog.image = FULL,MINIMAL,NOBLOB
canal.instance.get.ddl.isolation = false
canal.destinations= example,p4-test
canal.conf.dir = ../conf
canal.auto.scan = true
canal.auto.scan.interval = 5
canal.instance.global.mode = spring 
canal.instance.global.lazy = false
canal.instance.global.spring.xml = classpath:spring/default-instance.xml

部署数据流如下:

未分类

tip:

虽然canal同时支持mixed和row类型的binlog日志,但是获取行数据时如果是mixed类型的日志则获取不到表名,所以本方案暂只支持row格式的binlog

数据同步

创建canal client应用订阅canal读取的binlog数据

1、开启多instance 订阅,订阅多个instance

public void initCanalStart() {
    List<String> destinations = canalProperties.getDestination();
    final List<CanalClient> canalClientList = new ArrayList<>();
    if (destinations != null && destinations.size() > 0) {
        for (String destination : destinations) {
            // 基于zookeeper动态获取canal server的地址,建立链接,其中一台server发生crash,可以支持failover
            CanalConnector connector = CanalConnectors.newClusterConnector(canalProperties.getZkServers(), destination, "", "");
            CanalClient client = new CanalClient(destination, connector);
            canalClientList.add(client);
            client.start();
        }
    }
    Runtime.getRuntime().addShutdownHook(new Thread() {
        public void run() {
            try {
                logger.info("## stop the canal client");
                for (CanalClient canalClient : canalClientList) {
                    canalClient.stop();
                }
            } catch (Throwable e) {
                logger.warn("##something goes wrong when stopping canal:", e);
            } finally {
                logger.info("## canal client is down.");
            }
        }
    });
}

订阅消息处理

private void process() {
    int batchSize = 5 * 1024;
    while (running) {
        try {
            MDC.put("destination", destination);
            connector.connect();
            connector.subscribe();
            while (running) {
                Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                long batchId = message.getId();
                int size = message.getEntries().size();
                if (batchId != -1 && size > 0) {
                    saveEntry(message.getEntries());
                }
                connector.ack(batchId); // 提交确认
                // connector.rollback(batchId); // 处理失败, 回滚数据
            }
        } catch (Exception e) {
            logger.error("process error!", e);
        } finally {
            connector.disconnect();
            MDC.remove("destination");
        }
    }
}

根据数据库事件处理消息,过滤消息列表,对数据变动进行处理,用到信息为:

  • insert :schemaName,tableName,beforeColumnsList
  • update :schemaName,tableName,afterColumnsList
  • delete :schemaName,tableName,afterColumnsList
RowChange rowChage = null;
    try {
        rowChage = RowChange.parseFrom(entry.getStoreValue());
    } catch (Exception e) {
        throw new RuntimeException("parse event has an error , data:" + entry.toString(), e);
    }
    EventType eventType = rowChage.getEventType();
    logger.info(row_format,
            entry.getHeader().getLogfileName(),
            String.valueOf(entry.getHeader().getLogfileOffset()), entry.getHeader().getSchemaName(),
            entry.getHeader().getTableName(), eventType,
            String.valueOf(entry.getHeader().getExecuteTime()), String.valueOf(delayTime));
    if (eventType == EventType.QUERY || rowChage.getIsDdl()) {
        logger.info(" sql ----> " + rowChage.getSql());
        continue;
    }
    DataService dataService = SpringUtil.getBean(DataService.class);
    for (RowData rowData : rowChage.getRowDatasList()) {
        if (eventType == EventType.DELETE) {
            dataService.delete(rowData.getBeforeColumnsList(), entry.getHeader().getSchemaName(), entry.getHeader().getTableName());
        } else if (eventType == EventType.INSERT) {
            dataService.insert(rowData.getAfterColumnsList(), entry.getHeader().getSchemaName(), entry.getHeader().getTableName());
        } else if (eventType == EventType.UPDATE) {
            dataService.update(rowData.getAfterColumnsList(), entry.getHeader().getSchemaName(), entry.getHeader().getTableName());
        } else {
            logger.info("未知数据变动类型:{}", eventType);
        }
    }
}

ColumnsList转换成MongoTemplate 可用的数据类:DBObject,顺便做下数据类型转换

public static DBObject columnToJson(List<CanalEntry.Column> columns) {
    DBObject obj = new BasicDBObject();
    try {
        for (CanalEntry.Column column : columns) {
            String mysqlType = column.getMysqlType();
            //int类型,长度11以下为Integer,以上为long
            if (mysqlType.startsWith("int")) {
                int lenBegin = mysqlType.indexOf('(');
                int lenEnd = mysqlType.indexOf(')');
                if (lenBegin > 0 && lenEnd > 0) {
                    int length = Integer.parseInt(mysqlType.substring(lenBegin + 1, lenEnd));
                    if (length > 10) {
                        obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : Long.parseLong(column.getValue()));
                        continue;
                    }
                }
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : Integer.parseInt(column.getValue()));
            } else if (mysqlType.startsWith("bigint")) {
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : Long.parseLong(column.getValue()));
            } else if (mysqlType.startsWith("decimal")) {
                int lenBegin = mysqlType.indexOf('(');
                int lenCenter = mysqlType.indexOf(',');
                int lenEnd = mysqlType.indexOf(')');
                if (lenBegin > 0 && lenEnd > 0 && lenCenter > 0) {
                    int length = Integer.parseInt(mysqlType.substring(lenCenter + 1, lenEnd));
                    if (length == 0) {
                        obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : Long.parseLong(column.getValue()));
                        continue;
                    }
                }
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : Double.parseDouble(column.getValue()));
            } else if (mysqlType.equals("datetime") || mysqlType.equals("timestamp")) {
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : DATE_TIME_FORMAT.parse(column.getValue()));
            } else if (mysqlType.equals("date")) {
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : DATE_FORMAT.parse(column.getValue()));
            } else if (mysqlType.equals("time")) {
                obj.put(column.getName(), StringUtils.isBlank(column.getValue()) ? null : TIME_FORMAT.parse(column.getValue()));
            } else {
                obj.put(column.getName(), column.getValue());
            }
        }
    } catch (ParseException e) {
        e.printStackTrace();
    }
    return obj;
}

tip:

DBObject对象如果同时用于保存原始数据和组合数据或其他数据,使用时应该深度拷贝对象生成副本,然后使用副本

数据拼接

我们获取了数据库数据后做拼接操作,比如两张用户表:

user_info:{id,user_no,user_name,user_password}
user_other_info:{id,user_no,idcard,realname}

拼接后mongo数据为:

user:{_id,user_no,userInfo:{id,user_no,user_name,user_password},userOtherInfo:{id,user_no,idcard,realname})

接收到的数据信息很多,如何才能简单的触发数据拼接操作呢?

先看我们能获取的信息:schemaName,tableName,DBObject,Event(insert,update,delete)

将这些信息标识拼接起来看看:/schemaName/tableName/Event(DBObject),没错,就是一个标准的restful链接。只要我们实现一个简单的springMVC 就能自动获取需要的数据信息进行拼接操作。

先实现@Controller,定义名称为Schema,value对应schemaName

@Target({ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
@Component
public  @interface Schema {
 String value() default "";
}

然后实现@RequestMapping,定义名称为Table,直接使用Canal中的EventType 对应RequestMethod

@Target({ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
@Documented
public  @interface Table {
    String value() default "";
    CanalEntry.EventType[] event() default {};
}

然后创建springUtil,实现接口ApplicationContextAware,应用启动 加载的时候初始化两个Map:intanceMap,handlerMap

private static ApplicationContext applicationContext = null;
//库名和数据处理Bean映射Map
private static Map<String, Object> instanceMap = new HashMap<String, Object>();
//路劲和数据处理Method映射Map
private static Map<String, Method> handlerMap = new HashMap<String, Method>();
@Override
public void setApplicationContext(ApplicationContext applicationContext) {
    if (SpringUtil.applicationContext == null) {
        SpringUtil.applicationContext = applicationContext;
        //初始化instanceMap数据
        instanceMap();
        //初始化handlerMap数据
        handlerMap();
    }
}
private void instanceMap() {
    Map<String, Object> beans = applicationContext.getBeansWithAnnotation(Schema.class);
    for (Object bean : beans.values()) {
        Class<?> clazz = bean.getClass();
        Object instance = applicationContext.getBean(clazz);
        Schema schema = clazz.getAnnotation(Schema.class);
        String key = schema.value();
        instanceMap.put(key, instance);
        logger.info("instanceMap [{}:{}]", key, bean == null ? "null" : clazz.getName());
    }
}
private void handlerMap() {
    if (instanceMap.size() <= 0)
        return;
    for (Map.Entry<String, Object> entry : instanceMap.entrySet()) {
        if (entry.getValue().getClass().isAnnotationPresent(Schema.class)) {
            Schema schema = entry.getValue().getClass().getAnnotation(Schema.class);
            String schemeName = schema.value();
            Method[] methods = entry.getValue().getClass().getMethods();
            for (Method method : methods) {
                if (method.isAnnotationPresent(Table.class)) {
                    Table table = method.getAnnotation(Table.class);
                    String tName = table.value();
                    CanalEntry.EventType[] events = table.event();
                    //未标明数据事件类型的方法不做映射
                    if (events.length < 1) {
                        continue;
                    }
                    //同一个方法可以映射多张表
                    for (int i = 0; i < events.length; i++) {
                        String path = "/" + schemeName + "/" + tName + "/" + events[i].getNumber();
                        handlerMap.put(path, method);
                        logger.info("handlerMap [{}:{}]", path, method.getName());
                    }
                } else {
                    continue;
                }
            }
        } else {
            continue;
        }
    }
}

调用方法:

public static void doEvent(String path, DBObject obj) throws Exception {
    String[] pathArray = path.split("/");
    if (pathArray.length != 4) {
        logger.info("path 格式不正确:{}", path);
        return;
    }
    Method method = handlerMap.get(path);
    Object schema = instanceMap.get(pathArray[1]);
    //查找不到映射Bean和Method不做处理
    if (method == null || schema == null) {
        return;
    }
    try {
        long begin = System.currentTimeMillis();
        logger.info("integrate data:{},{}", path, obj);
        method.invoke(schema, new Object[]{obj});
        logger.info("integrate data consume: {}ms:", System.currentTimeMillis() - begin);
    } catch (Exception e) {
        logger.error("调用组合逻辑异常", e);
        throw new Exception(e.getCause());
    }
}

数据拼接消息处理:

@Schema("demo_user")
public class UserService {
    @Table(value = "user_info", event = {CanalEntry.EventType.INSERT, CanalEntry.EventType.UPDATE})
    public void saveUser_UserInfo(DBObject userInfo) {
        String userNo = userInfo.get("user_no") == null ? null : userInfo.get("user_no").toString();
        DBCollection collection = completeMongoTemplate.getCollection("user");
        DBObject queryObject = new BasicDBObject("user_no", userNo);
        DBObject user = collection.findOne(queryObject);
        if (user == null) {
            user = new BasicDBObject();
            user.put("user_no", userNo);
            user.put("userInfo", userInfo);
            collection.insert(user);
        } else {
            DBObject updateObj = new BasicDBObject("userInfo", userInfo);
            DBObject update = new BasicDBObject("$set", updateObj);
            collection.update(queryObject, update);
        }
    }
}

示例源码

https://github.com/zhangtr/canal-mongo

欢迎讨论方案或者指正代码

MONGODB两个报错处理方法

报错:

rico@ubuntu:~$ mongo
MongoDB shell version: 3.2.17
connecting to: test
2017-11-16T23:48:59.362+0800 W NETWORK  [thread1] Failed to connect to 127.0.0.1:27017, in(checking socket for error after poll), reason: errno:111 Connection refused
2017-11-16T23:48:59.362+0800 E QUERY    [thread1] Error: couldn’t connect to server 127.0.0.1:27017, connection attempt failed :
connect@src/mongo/shell/mongo.js:229:14
@(connect):1:6

exception: connect failed

解决方法:

sudo rm /var/lib/mongodb/mongod.lock
sudo mongod --repair --dbpath /var/lib/mongodb
sudo mongod --fork --logpath /var/lib/mongodb/mongodb.log --dbpath /var/lib/mongodb 
sudo service mongodb start

报错:

rico@ubuntu:/var/lib/mongodb$ sudo service mongodb start
Failed to start mongodb.service: Unit mongodb.service is masked.

解决方法:

rico@ubuntu:/var/lib/mongodb$ sudo chown -R mongodb:mongodb /var/lib/mongodb
rico@ubuntu:/var/lib/mongodb$ sudo systemctl start mongod

MongoDB日常使用的技巧与注意事项汇总

前言

这是一篇日常使用MongoDB时候遇到的问题的解决技巧的文章,分享出来供大家参考学习,下面话不多说了,来一起看看详细的介绍吧。

一、查找数组字段不为空的记录

查找数据中数组字段不为空的记录。

举个例子:有以下Mongo文档,

{
 "id" : "581c060f2b436c05aafb1632",
 "commit_history" : [ 
 "581c20d52b436c05aafb1633", 
 "581c21c12b436c05aafb1634"
 ]
},
{
 "id" : "581c060f2b436c05aafb1633",
 "commit_history" : []
}

想要查找commit_history不为空的记录,有以下方法:

  方法一: db.collection.find({commit_history: {$not: {$size: 0}}})

  方法二: db.collection.find({'commit_history.0': {$exists: 1}})

二、MongoDB添加用户

在MongoDB中为一个Collection添加用户,可以如下操作:

use collection_name 切换到某个库

db.createUser(
 {
 user: "collection_name",
 pwd: "password",
 roles: [ "readWrite", "dbAdmin" ]
 }
)

三、有的时候需要删除指定字段那一列,使用update操作

例如要删除name这一列:

query json:

{"name":{$exists:true}} 

update json:

{$unset:{"name":""}} 

四、数据导出,在mongodb的bin目录执行mongoexport命令并设定相关的参数

例如:

./mongoexport -h 192.168.0.201 --port 27017 Cd admin Cu admin Cp admin -c department -o /home/admin/department.dat 
  • -h:指定要连接的数据库的ip;
  • –port:指定要连接的数据库的端口;
  • -u:指定要连接的数据库的用户名;
  • -p:指定要连接的数据库的用户密码;
  • -d:指定要连接的库名;
  • -c:指定要导出的数据集合;
  • -o:指定要导出的数据目标存放地址;

注: (1)、需要保证连接的数据库处于正常运行状态中;
(2)、我曾遇到过一中情况,数据库中加入了用户信息,启动时是没有以用户验证的方式启动,但是执行这个命令的时候,还是在我指定了用户名和密码的情况下才导出成功,如果有人遇到相似情况,不妨一试。

五、数据导入,在mongodb的bin目录执行mongoimport命令并设置相关的参数,参数解释和上边一样

例如:

./mongoimport --port 27017 -d admin -u admin Cp admin Cc department /home/common/mongodb305/bin/department.dat 

六、非amdin数据库的用户验证问题:

我们给mongodb数据库中的库添加用户,可以在目标数据库中使用如下命令,例如在mongoTest库中添加一个拥有读写权限的用户:

db.createUser({"user":"test","pwd":"123456","roles":["readWrite"]}) 

也可以在admin数据库中添加:

db.createUser({"user":"test","pwd":"123456","roles":[{"role":"readWrite","db":"test"},"readWrite"]}) 

要注意的是:这两种方式是有区别的,也正是这个区别曾坑了我一把:

使用第一种方式添加的时候,我们可以在mongodb的bin目录下直接执行如下命令进入test数据库中操作,增删改查都可以;也可以使用这个用户 名和密码在mongoVUE中连接:

./mongo -h 192.168.0.201 --port 27017 -u test -p 123456 -d test 

但是如果是第二种方式创建的,那么再直接使用上边的命令,会提示验证失败,只有当先进入mongo shell连接到admin数据库,再切换到test数据库的时候才能验证通过。这算是一个小坑,不明情况的或许会非常纠结,明明用户名和密码没问题,却不知为什么就是连不上。

七、mongodb3.0默认的数据存储方式还是原来2.6一样的,我尝试过更改为新的存储方式,在启动时使用如下参数,但需要注意的是,需要在数据库中没有数据的前提下才可以,否则会报错:

./mongod -f /mongodb304/conf/mongodb.conf --storageEngine wiredTiger 

mongodb.conf中配置了启动的其他各种参数,如dbpath、logpath等。

总结

以上就是这篇文章的全部内容了,希望本文的内容对大家的学习或者工作具有一定的参考学习价值,如果有疑问大家可以留言交流,谢谢大家对编程小技巧的支持。

MongoDb用户权限控制

有朋友在评论里问到关于mongodb的auth的问题,我找资料研究了一下,把操作过程记录了下来。

Mongodb像oracle一样,有着严格的用户、角色权限控制。具体可以参照官网文档:https://docs.mongodb.com/manual/reference/method/db.createUser/

Mongodb创建用户的语法在不用的版本之间还是不一样的。我这里使用的版本3.0.6。版本3.0之前使用的是db.addUser(),但3.0之后使用的是db.createUser()。3.0后版本中再使用db.addUser()会报如下错误:

> db.addUser('dba','dba')
2017-11-17T13:17:08.001+0800 E QUERY    TypeError: Property 'addUser' of object admin is not a function

如果数据库中还没有添加任何用户,要想新创建一个用户,要先把auth认证停掉,在进入数据库,也就是让auth=false。

[root@MidApp mongodb]# cat mongodb.conf#配置文件
dbpath=/data/db
logpath=/usr/local/mongodb/logs/mongodb.log
logappend=true
port=27000
fork=true
auth=false
nohttpinterface=false
bind_ip=192.168.221.161
journal=false
quiet=true

登入数据库,只能看到一个库,看不到admin库:

[root@MidApp mongodb]# mongo 192.168.221.161:27000 
MongoDB shell version: 3.0.6
connecting to: 192.168.221.161:27000/test
> show dbs
local  0.078GB

现在需要创建一个帐号,该账号需要有grant权限,即:账号管理的授权权限。注意一点,mongodb帐号是跟着库走的,所以在指定库里授权,必须也在指定库里验证(auth)

> use admin
switched to db admin
> db.createUser({user:"dba",pwd:"dba",roles:[{role:"userAdminAnyDatabase",db:"admin"}]})
Successfully added user: {
"user" : "dba",
"roles" : [
{
"role" : "userAdminAnyDatabase",
"db" : "admin"
}
]
}
> db.system.users.find()
{ "_id" : "admin.dba", "user" : "dba", "db" : "admin", "credentials" : { "SCRAM-SHA-1" : { "iterationCount" : 10000, "salt" : "MXvU7oJanxW7gPw+NwI7rw==", "storedKey" : "lTPmK31qbk1YKmx5stmYiphsQZE=", "serverKey" : "gVovcstiwC0nuU6LTXZAiWkucfA=" } }, "roles" : [ { "role" : "userAdminAnyDatabase", "db" : "admin" } ] }
> db.system.users.find().pretty()
{
"_id" : "admin.dba",
"user" : "dba",
"db" : "admin",
"credentials" : {
"SCRAM-SHA-1" : {
"iterationCount" : 10000,
"salt" : "MXvU7oJanxW7gPw+NwI7rw==",
"storedKey" : "lTPmK31qbk1YKmx5stmYiphsQZE=",
"serverKey" : "gVovcstiwC0nuU6LTXZAiWkucfA="
}
},
"roles" : [
{
"role" : "userAdminAnyDatabase",
"db" : "admin"
}
]
}

可以看到创建了一个用户dba,密码dba,拥有admin库的userAdminAnyDatabase角色。下面看一下mongodb中的内置角色:

    1. 数据库用户角色:read、readWrite;
    2. 数据库管理角色:dbAdmin、dbOwner、userAdmin;
    3. 集群管理角色:clusterAdmin、clusterManager、clusterMonitor、hostManager;
    4. 备份恢复角色:backup、restore;
    5. 所有数据库角色:readAnyDatabase、readWriteAnyDatabase、userAdminAnyDatabase、dbAdminAnyDatabase
    6. 超级用户角色:root  
    // 这里还有几个角色间接或直接提供了系统超级用户的访问(dbOwner 、userAdmin、userAdminAnyDatabase)
    7. 内部角色:__system

看一下具体的角色定义:

Read:允许用户读取指定数据库

readWrite:允许用户读写指定数据库

dbAdmin:允许用户在指定数据库中执行管理函数,如索引创建、删除,查看统计或访问system.profile

userAdmin:允许用户向system.users集合写入,可以找指定数据库里创建、删除和管理用户

clusterAdmin:只在admin数据库中可用,赋予用户所有分片和复制集相关函数的管理权限。

readAnyDatabase:只在admin数据库中可用,赋予用户所有数据库的读权限

readWriteAnyDatabase:只在admin数据库中可用,赋予用户所有数据库的读写权限

userAdminAnyDatabase:只在admin数据库中可用,赋予用户所有数据库的userAdmin权限

dbAdminAnyDatabase:只在admin数据库中可用,赋予用户所有数据库的dbAdmin权限。

root:只在admin数据库中可用。超级账号,超级权限

我们打开auth参数,来验证一下。

[root@MidApp mongodb]# mongo 192.168.221.161:27000 
MongoDB shell version: 3.0.6
connecting to: 192.168.221.161:27000/test
> show dbs#没有验证,不会有权限
2017-11-17T13:04:35.357-0800 E QUERY    Error: listDatabases failed:{
"ok" : 0,
"errmsg" : "not authorized on admin to execute command { listDatabases: 1.0 }",
"code" : 13
}
    at Error (<anonymous>)
    at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
    at shellHelper.show (src/mongo/shell/utils.js:630:33)
    at shellHelper (src/mongo/shell/utils.js:524:36)
    at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
> use admin#在admin库下面添加的账号,所以要切到admin下面认证
switched to db admin
> db.auth('dba','dba')
1
> show dbs
admin  0.078GB
local  0.078GB

可以看到,创建的dba用户已经验证成功。接下来我在创建两个用户,验证一下其他角色权限。创建一个只读用户,一个读写用户。

> use test;
switched to db test
> db.createUser({user:"zduser",pwd:"zduser",roles:[{role:"read",db:"test"}]})
Successfully added user: {
"user" : "zduser",
"roles" : [
{
"role" : "read",
"db" : "test"
}
]
}
> db.createUser({user:"dxuser",pwd:"dxuser",roles:[{role:"readWrite",db:"test"}]})
Successfully added user: {
"user" : "dxuser",
"roles" : [
{
"role" : "readWrite",
"db" : "test"
}
]
}
> show users;
{
"_id" : "test.zduser",
"user" : "zduser",
"db" : "test",
"roles" : [
{
"role" : "read",
"db" : "test"
}
]
}
{
"_id" : "test.dxuser",
"user" : "dxuser",
"db" : "test",
"roles" : [
{
"role" : "readWrite",
"db" : "test"
}
]
}
>

在test库中创建一个集合,验证一下这两个用户权限:

> show tables;#userAdminAnyDatabase权限只针对用户管理,没有其他的权限
2017-11-17T13:47:39.845-0800 E QUERY    Error: listCollections failed: {
"ok" : 0,
"errmsg" : "not authorized on test to execute command { listCollections: 1.0 }",
"code" : 13
}
    at Error (<anonymous>)
    at DB._getCollectionInfosCommand (src/mongo/shell/db.js:646:15)
    at DB.getCollectionInfos (src/mongo/shell/db.js:658:20)
    at DB.getCollectionNames (src/mongo/shell/db.js:669:17)
    at shellHelper.show (src/mongo/shell/utils.js:625:12)
    at shellHelper (src/mongo/shell/utils.js:524:36)
    at (shellhelp2):1:1 at src/mongo/shell/db.js:646
> exit
bye
[root@MidApp mongodb]# mongo 192.168.221.161:27000 #重新登录一下
MongoDB shell version: 3.0.6
connecting to: 192.168.221.161:27000/test
> use test
switched to db test
> db.tb1.insert({"a":1,"b":2})#先试着插入数据看看
WriteResult({
"writeError" : {
"code" : 13,
"errmsg" : "not authorized on test to execute command { insert: "tb1", documents: [ { _id: ObjectId('5a0f595b3b6523dcb81d4f76'), a: 1.0, b: 2.0 } ], ordered: true }"
}
})
> db.auth('dxuser','dxuser')#用可读写的用户认证
1
> db.tb1.insert({"a":1,"b":2})#可以插入数据
WriteResult({ "nInserted" : 1 })
> db.tb1.insert({"a":11,"b":22})
WriteResult({ "nInserted" : 1 })
> db.tb1.insert({"a":111,"b":222})
WriteResult({ "nInserted" : 1 })
> db.tb1.find()
{ "_id" : ObjectId("5a0f597f3b6523dcb81d4f77"), "a" : 1, "b" : 2 }
{ "_id" : ObjectId("5a0f59933b6523dcb81d4f78"), "a" : 11, "b" : 22 }
{ "_id" : ObjectId("5a0f59983b6523dcb81d4f79"), "a" : 111, "b" : 222 }
> db.auth('zduser','zduser')#切换只读用户
1
> db.tb1.insert({"a":1111,"b":2222})#没有权限插入数据
WriteResult({
"writeError" : {
"code" : 13,
"errmsg" : "not authorized on test to execute command { insert: "tb1", documents: [ { _id: ObjectId('5a0f59c63b6523dcb81d4f7a'), a: 1111.0, b: 2222.0 } ], ordered: true }"
}
})
> db.tb1.find()#可以查看数据
{ "_id" : ObjectId("5a0f597f3b6523dcb81d4f77"), "a" : 1, "b" : 2 }
{ "_id" : ObjectId("5a0f59933b6523dcb81d4f78"), "a" : 11, "b" : 22 }
{ "_id" : ObjectId("5a0f59983b6523dcb81d4f79"), "a" : 111, "b" : 222 }
>

注意:Mongodb用户是跟着数据库密切相关的,在哪个库授权的用户就需要去哪个库下面进行认证。

MongoDB基本概念和安装配置

基本概念

MongoDB直接存储JSON。

有了NoSQL数据库之后,可以直接在业务层将数据按照指定的结构进行存储。

未分类

MongoDB跟Node.js捆绑在一起了(taobao用了Node.js)。

面向集合存储,支持索引,支持短暂保留,基于BSON应用。支持python、.net、php等。

MongoDB集合传统的mysql或者其他关系型数据库一起使用。

安装配置

安装Mongo到E盘,创建mongod.cfg配置文件,设置MongoDB服务,启动MongoDB,关闭MongoDB服务。

systemLog:
 destination: file
 path: E:MongoDBdatalogmongod.log
storage:
 dbPath: E:MongoDBdatadb
sc.exe create MongoDB binPath= "E:MongoDBServerbinmongod.exe --service --config="E:MongoDBmongod.cfg"" DisplayName= "MongoDB" start= "auto" 
net start MongoDB 
net stop MongoDB  

更多的配置案例

systemLog:  
    quiet: false  
    path: E:MongoDBdatalogmongod.log  
    logAppend: false  
    destination: file  
processManagement:  
    fork: true  
    pidFilePath: E:MongoDBdatamongod.pid  
net:  
    bindIp: 127.0.0.1  
    port: 27017  
    maxIncomingConnections: 65536  
    wireObjectCheck: true  
    ipv6: false   
storage:  
    dbPath: E:MongoDBdatadb 
    indexBuildRetry: true  
    journal:  
        enabled: true  
    directoryPerDB: false  
    engine: mmapv1  
    syncPeriodSecs: 60   
    mmapv1:  
        quota:  
            enforced: false  
            maxFilesPerDB: 8  
        smallFiles: true      
        journal:  
            commitIntervalMs: 100  
    wiredTiger:  
        engineConfig:  
            cacheSizeGB: 8  
            journalCompressor: snappy  
            directoryForIndexes: false    
        collectionConfig:  
            blockCompressor: snappy  
        indexConfig:  
            prefixCompression: true  
operationProfiling:  
    slowOpThresholdMs: 100  
    mode: off  

未分类

当MongoDB服务启动之后,可以使用mongo命令来连接。

mongo

未分类

在浏览器中查看默认的端口是27017

http://127.0.0.1:27017/

未分类

通过可视化工具连接测试

未分类

显示数据库

show databases;

通过端口号启动

mongo --port=27017