服务器1 客户端 服务器2 数据库 服务器3 ...
服务器1 客户端==>Nginx 服务器2 数据库 服务器3 ...
cd /opt/ mkdir docker_nginx cd docker_nginx/ vi docker-compose.yml
version: '3.1' services: nginx: restart: always image: daocloud.io/library/nginx:1.9.1 container_name: nginx ports: - 80:80
user nginx; worker_processes 1; error_log /var/log/nginx/error.log warn; #以上统称为全局快 #worker_processes的数值越大, Nginx的并发能力越强 #error_log 代表Nginx的错误日志存放的位置 events { worker_connections 1024; } #event块 #worker_connections的数值越大, Nginx的并发能力越强 http { include /etc/nginx/mime.types; default_type application/octet-stream; server { listen 80; server_name localhost; location / { root /usr/share/nginx/html; index index.html index.htm; } #location块 #root: 将接收到的请求根据/usr/share/nginx/html去查找静态资源 #index: 默认去上述的路径中找到index.html或者index.htm } #server块 #listen: 代表Nginx监听的端口号 #location: 代表Nginx接收请求的ip } #http块 #include代表引入一个外部的文件=>/etc/nginx/mime.types中存放着大量的媒体类型 #include /etc/nginx/conf.d/*.conf=>引入了conf.d目录下的以.conf结尾的配置文件
version: '3.1' services: nginx: restart: always image: daocloud.io/library/nginx:1.9.1 container_name: nginx ports: - 80:80 volumes: - /opt/docker_nginx/conf.d/:/etc/nginx/conf.d
docker-compose down cd /opt/docker_nginx/ vi docker-compose.yml docker-compose build docker-compose up -d ls cd conf.d/ vi default.conf #加上下方的内容
server{ listen 80; server_name localhost; location / { root /usr/share/nginx/html; index index.html index.htm; } }
docker-compose restart
server{ listen 80; server_name localhost; #基于反向代理访问到Tomcat服务器 location / { proxy_pass http://192.168.59.129:8080/; } }
#1. =匹配 location = / { #精准匹配, 主机名后面不能带任何的字符串 }
#2. 通用匹配 location /xxx { #匹配所有以/xxx开头的路径 }
#3. 正则匹配 location ~ /xxx { #匹配所有以/xxx开头的路径 }
#4. 匹配开头路径 location ^~ /images/ { #匹配所有以/images开头的路径 }
#5. ~* \.(gif|jpg|png)$ { #匹配以gif或者jpg或者png结尾的路径 }
server{ listen 80; server_name localhost; location = /index/ { proxy_pass http://192.168.59.129:8082/; #Tomcat首页 } location ^~ /ssm/ { proxy_pass http://192.168.59.129:8082/ssm1/; #SSM首页 } #基于反向代理访问到Tomcat服务器 location / { proxy_pass http://192.168.59.129:8080/; #Hello Nginx! } }
Nginx为我们提供了三种负载均衡的策略
upstream my-server{ server 192.168.59.129:8080; server 192.168.59.129:8082; } server{ listen 80; server_name localhost; location / { proxy_pass http://my-server/; } }
upstream my-server{ server 192.168.59.129:8080 weight=10; #Hello server 192.168.59.129:8082 weight=2; #Tomcat } server{ listen 80; server_name localhost; location / { proxy_pass http://my-server/; } }
upstream my-server{ ip_hash; server 192.168.59.129:8080 weight=10; #Hello server 192.168.59.129:8082 weight=2; #Tomcat } server{ listen 80; server_name localhost; location / { proxy_pass http://my-server/; } }
#配置如下 location / { proxy_pass 路径; }
#配置如下 location / { root 静态资源路径; index 默认访问路径下的什么资源; autoindex on; #代表展示静态资源下的全部内容, 以列表的形式展开 } #先修改docker-compose.yml, 添加数据卷, 映射到Nginx服务器的一个目录 #添加了index.html和1.png静态资源 #修改配置文件
version: '3.1' services: nginx: restart: always image: daocloud.io/library/nginx:1.9.1 container_name: nginx ports: - 80:80 volumes: - /opt/docker_nginx/conf.d/:/etc/nginx/conf.d - /opt/docker_nginx/img/:/data/img - /opt/docker_nginx/html/:/data/html
docker-compose down docker-compose up -d ls cd html/ vi index.html #<h1>Hello Static Resource!</h1> #然后找一张图片拖拽到Linux的root下 cd ~ mv Thread.png 1.png mv 1.png /opt/docker_nginx/img/ cd /opt/docker_nginx/img/ ls cd ../conf.d/ vi default.conf #配置如下
upstream my-server{ ip_hash; server 192.168.59.129:8080 weight=10; #Hello server 192.168.59.129:8082 weight=2; #Tomcat } server{ listen 80; server_name localhost; #代理到html静态资源 location /html { root /data; index index.html; } #代理到img静态资源 location /img { root /data; autoindex on; } }
cd ../ docker-compose restart #访问如下网址 #http://192.168.59.129/html/ #http://192.168.59.129/html/index.html #http://192.168.59.129/img/ #http://192.168.59.129/img/1.png
FROM nginx:1.13.5-alpine RUN apk update && apk upgrade RUN apk add --no-cache bash curl ipvsadm iproute2 openrc keepalived COPY entrypoint.sh /entrypoint.sh RUN chmod +x /entrypoint.sh CMD ["/entrypoint.sh"]
#!/bin/bash #/usr/sbin/keepalived -n -l -D -f /etc/keepalived/keepalived.conf --dont-fork --log-console & /usr/sbin/keepalived -D -f /etc/keepalived/keepalived.conf nginx -g "daemon off;"
version: '3.1' services: nginx_master: build: #构建自定义镜像 context: ./ #指定dockerfile文件的所在路径 dockerfile: ./Dockerfile #指定Dockerfile文件名称 ports: - 8081:80 volumes: - ./index-master.html:/usr/share/nginx/html/index.html - ./favicon.ico:/usr/share/nginx/html/favicon.ico - ./keepalived-master.conf:/etc/keepalived/keepalived.conf networks: static-network: ipv4_address: 172.20.128.2 cap_add: - NET_ADMIN nginx_slave: build: context: ./ dockerfile: ./Dockerfile ports: - 8082:80 volumes: - ./index-slave.html:/usr/share/nginx/html/index.html - ./favicon.ico:/usr/share/nginx/html/favicon.ico - ./keepalived-slave.conf:/etc/keepalived/keepalived.conf networks: static-network: ipv4_address: 172.20.128.3 cap_add: - NET_ADMIN proxy: image: haproxy:1.7-alpine ports: - 80:6301 volumes: - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg networks: - static-network networks: static-network: ipam: config: - subnet: 172.20.0.0/16
vrrp_script chk_nginx { script "pidof nginx" interval 2 } vrrp_instance VI_1 { state MASTER interface eth0 #容器内部的网卡名称 virtual_router_id 33 priority 200 #优先级 advert_int 1 authentication { auth_type PASS auth_pass letmein } virtual_ipaddress { 172.20.128.50 #虚拟路径 } track_script { chk_nginx } }
vrrp_script chk_nginx { script "pidof nginx" interval 2 } vrrp_instance VI_1 { state BACKUP interface eth0 #容器内部的网卡名称 virtual_router_id 33 priority 100 #优先级 advert_int 1 authentication { auth_type PASS auth_pass letmein } virtual_ipaddress { 172.20.128.50 #虚拟路径 } track_script { chk_nginx } }
global log 127.0.0.1 local0 maxconn 4096 daemon nbproc 4 defaults log 127.0.0.1 local3 mode http option dontlognull option redispatch retries 2 maxconn 2000 balance roundrobin timeout connect 5000ms timeout client 5000ms timeout server 5000ms frontend main bind *:6301 default_backend webserver backend webserver server nginx_master 172.20.128.50:80 check inter 2000 rise 2 fall 5
<h1>master</h1>
<h1>slave</h1>
#将以上8个文件拖拽到/opt/docker_nginx_cluster/(通过Xterm的sftp) cd /opt/ mkdir docker_nginx_cluster cd docker_nginx_cluster docker-compose up -d docker ps -a #然后访问http://192.168.59.129:8081/ 会看到master, 访问http://192.168.59.129:8082/ 会看到slave #访问http://192.168.59.129 看到master(默认) #然后我们把8081端口关闭 docker stop 容器ID #再次访问http://192.168.59.129 会看到slave #当8081端口的Nginx服务器挂了之后, 我们还有8082端口的Nginx服务器, 依旧不影响使用 #至此Nginx集群搭建成功
#我在运行docker-compose up -d命令后, 发现nginx_master和nginx-slave这两个容器刚启动就关闭了,然后就去看日志 docker ps -a docker logs -f 容器ID #-f: 可以查看日志的最后几行 #然后发现如下错误 #standard_init_linux.go:228: exec user process caused: no such file or directory #百度了一下是dos字符与unix字符的问题 #因为我使用的notepad++来编辑文件并上传到服务器中,默认是以DOS文本格式来创建的, 需要把文件转换下字符
解决
yum -y install dos2unix
dos2unix Dockerfile
docker-compose build #如果重新构建还不行, 先把所有容器停止再删除后重新启动 docker stop $(docker ps -qa) docker rm $(docker ps -qa) docker-compose up -d
docker network ls #查看docker网卡 docker network rm 网卡id #删除docker网卡 docker network inspect 网卡id #查看docker网卡的相关详细信息