c1000k,单机百万并发测试

6/19/2022 知识

# c1000k,单机百万并发测试

使用epoll实现的reactor服务端,开启一个进程多个端口,然后使用多个客户端 ip 多端口来连接

​ 1、在16g内存的笔记本上

​ 2、用四台虚拟机(一台服务器 三台客户端)

​ 3、使用epoll写的reactor反应堆服务端,源码在最下面

​ 4、多线程的客户端,源码在最下面

​ 并发:一个服务器能够同时承载客户端的数量

​ 服务器能同时建立连接的数量 是并发的基础

​ 承载 100w 在200ms内返回

​ 这里回声服务器 没有下列多余影响

​ 1、数据库 2、网络带宽 3、内存操作 4、日志

# 一、一个连接的定义

# 1.1 服务器理论最大并发数

​ 一个连接包含五元组,源ip(sip)、目的ip(dip)源端口(sport)、目的端、(dport) 协议

2 的 32 次方(源ip数)× 2的 16 次方(源port数)× 2 的 32 次方(目的ip数)× 2的 16 次方(目的port数)大约等于四百多亿亿亿

(不过每条连接都会消耗服内存,实践中绝不可能达到这个理论数字。)

# 1.2 测试实验配置

这里使用一个服务端ip * 100服务端端口 * 60000客户端端口 * 3 个客户端 理论上可以达到200w连接 奈何笔记本内存不够

sip 多个客户端 这里采用三个虚拟机(三个ip)

dip 服务器IP只有一个 使用一个虚拟机(一个ip)

sport 客户端数量 使用60000个端口

dport 服务的端口 使用100个端口

proto tcp 使用tcp连接

# 1.3 linux系统的连接相关默认配置

看看ubuntu20.04的默认配置

root@luo:~# ulimit -n
1024
root@luo:~# sysctl -a |grep mem
...
net.ipv4.tcp_mem = 92880	123843	185760 # 92880* 4k 300m 500m 700m 当协议栈占用空间500m时优化 大于700m时不再分配
net.ipv4.tcp_rmem = 4096	131072	6291456
net.ipv4.tcp_wmem = 4096	16384	4194304
...
net.ipv4.ip_local_port_range = 32768	60999
fs.file-max = 9223372036854775807
net.nf_conntrack_max = 65536
1
2
3
4
5
6
7
8
9
10
11

# 1.4下面解释配置的作用

1、net.ipv4.tcp_mem TCP使用了内存页面数

​ net.ipv4.tcp_rmem 为TCP socket预留用于接收缓冲的内存 ​ net.ipv4.tcp_wmem 为TCP socket预留用于发送缓冲的内存

​ tcp_wmem tcp_rmem服务器传输大文件就调大 传输字符 调小(但可能ssh 都会很慢) 一个fd就是tcp_wmem tcp_rmem所占用空间 使用更少的内存

2、ulimit -n 和 net.nf_conntrack_max fd 限制 ulimit 限制fd 数量

​ 默认单个进程打开的fd为1000多个 可以修改ulimit -n 1048576(临时修改) 或 /etc/security/limits.conf(永久修改)

accept: Too many open files
1

3、net.ipv4.ip_local_port_range 默认用户只能用30000以上的端口建立连接 要自己改

​ 建立连接时 服务端端口从三万多开始挨个遍历 找到一个没有被占用的

connections: 27999, sockfd:28002, time_used:3193
connect: Cannot assign requested address
error : Cannot assign requested address
1
2
3

4、net.nf_conntrack_max 服务端建立连接的syn连接数

​ iptables基于netfilter的应用程序 iptables会调用netfilrer的接口

​ 报文sk_buff从网卡上到达协议栈时会经过netfilter

conntion timeout
1

# 二、实验开始

# 2.1 使用sysctl临时修改配置(重启后失效)

#send buffer 服务器传输大文件就调大  传输字符 调小(但可能ssh 都会很慢) 一个fd就是tcp_wmem  tcp_rmem所占用空间 使用更少的内存
sysctl -w net.ipv4.tcp_wmem="2048 2048 4096"
sysctl -w net.ipv4.tcp_rmem="2048 2048 4096"

sysctl -w net.ipv4.tcp_mem="262144 524288 786432" # 262144*4k 1g 2g 3g 当协议栈占用空间2g时优化 大于3g时不再分配
sysctl -w net.ipv4.ip_local_port_range="1025 64000"
ulimit -n 1048576
modprobe ip_conntrack
sysctl -w net.nf_conntrack_max=1048576
1
2
3
4
5
6
7
8
9

# 2.2 编译最下方源码 开启c/s回声服务器看效果

send[fd=980328], [32]Hello Server: client --> 329478

1、服务端启动后看一下内存

root@luo:~# free
              total        used        free      shared  buff/cache   available
Mem:        8117280     2376860     5343540        1620      396880     5485128
Swap:       4001788           0     4001788
1
2
3
4

2、服务端维持980000连接时的内存

root@luo:~# free
              total        used        free      shared  buff/cache   available
Mem:        8117280     6973192      103992           0     1040096      913852
Swap:       4001788     1090996     2910792

1
2
3
4
5

3、计算单条连接消耗内存

6973192 - 2376860 = 4596332 ÷ 800000= 4.69

由于笔记本开了4台虚拟机一起跑 连接总是到90多万的时候虚拟机崩溃 最多980000

平均计算一个连接4k左右

# 三、测试中的一些问题以及优化方向

文件描述符fd不够

​ 端口数量不够

​ accept 全连接队列满了

​ 客户端超时 服务端不会ack 连接队列满了?

半连接全连接 连接队列的问题 满了后需要排队

会导致网络抖动 accept线程池竞争 多线程accept

多线程的网络模型 redis memcached nginx

​ 1、accept与recv/send的fd分开

​ 2、多线程accept

​ nginx memcached多进程更符合业务处理 固定多线程接入 多线程业务处理

# 源码

reactor.c

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <arpa/inet.h>

#include <fcntl.h>
#include <unistd.h>
#include <errno.h>


#define BUFFER_LENGTH       1024
#define MAX_EPOLL_EVENTS    1024*1024 //connection 
#define MAX_EPOLL_ITEM      102400 //con
#define SERVER_PORT         8888

#define LISTEN_PORT_COUNT   100

typedef int NCALLBACK(int ,int, void*);

struct ntyevent {
    int fd;
    int events;
    void *arg;
    int (*callback)(int fd, int events, void *arg);
    
    int status;
    char buffer[BUFFER_LENGTH];
    int length;
    long last_active;
};



struct ntyreactor {
    int epfd;
    struct ntyevent *events; // 1024 * 1024
};


int recv_cb(int fd, int events, void *arg);
int send_cb(int fd, int events, void *arg);


void nty_event_set(struct ntyevent *ev, int fd, NCALLBACK callback, void *arg) {

    ev->fd = fd;
    ev->callback = callback;
    ev->events = 0;
    ev->arg = arg;
    ev->last_active = time(NULL);

    return ;
    
}


int nty_event_add(int epfd, int events, struct ntyevent *ev) {

    struct epoll_event ep_ev = {0, {0}};
    ep_ev.data.ptr = ev;
    ep_ev.events = ev->events = events;

    int op;
    if (ev->status == 1) {
        op = EPOLL_CTL_MOD;
    } else {
        op = EPOLL_CTL_ADD;
        ev->status = 1;
    }

    if (epoll_ctl(epfd, op, ev->fd, &ep_ev) < 0) {
        printf("event add failed [fd=%d], events[%d]\n", ev->fd, events);
        return -1;
    }

    return 0;
}

int nty_event_del(int epfd, struct ntyevent *ev) {

    struct epoll_event ep_ev = {0, {0}};

    if (ev->status != 1) {
        return -1;
    }

    ep_ev.data.ptr = ev;
    ev->status = 0;
    epoll_ctl(epfd, EPOLL_CTL_DEL, ev->fd, &ep_ev);

    return 0;
}

int recv_cb(int fd, int events, void *arg) {

    struct ntyreactor *reactor = (struct ntyreactor*)arg;
    struct ntyevent *ev = reactor->events+fd;

    int len = recv(fd, ev->buffer, BUFFER_LENGTH, 0);
    nty_event_del(reactor->epfd, ev);

    if (len > 0) {
        
        ev->length = len;
        ev->buffer[len] = '\0';

        printf("C[%d]:%s\n", fd, ev->buffer);

        nty_event_set(ev, fd, send_cb, reactor);
        nty_event_add(reactor->epfd, EPOLLOUT, ev);
        
        
    } else if (len == 0) {

        close(ev->fd);
        printf("[fd=%d] pos[%ld], closed\n", fd, ev-reactor->events);
         
    } else {

        close(ev->fd);
        printf("recv[fd=%d] error[%d]:%s\n", fd, errno, strerror(errno));
        
    }

    return len;
}


int send_cb(int fd, int events, void *arg) {

    struct ntyreactor *reactor = (struct ntyreactor*)arg;
    struct ntyevent *ev = reactor->events+fd;

    int len = send(fd, ev->buffer, ev->length, 0);
    if (len > 0) {
        printf("send[fd=%d], [%d]%s\n", fd, len, ev->buffer);

        nty_event_del(reactor->epfd, ev);
        
        nty_event_set(ev, fd, recv_cb, reactor);
        nty_event_add(reactor->epfd, EPOLLIN, ev);
        
    } else {

        close(ev->fd);

        nty_event_del(reactor->epfd, ev);
        printf("send[fd=%d] error %s\n", fd, strerror(errno));

    }

    return len;
}

int accept_cb(int fd, int events, void *arg) {

    struct ntyreactor *reactor = (struct ntyreactor*)arg;
    if (reactor == NULL) return -1;

    struct sockaddr_in client_addr;
    socklen_t len = sizeof(client_addr);

    int clientfd;

    if ((clientfd = accept(fd, (struct sockaddr*)&client_addr, &len)) == -1) {
        if (errno != EAGAIN && errno != EINTR) {
            
        }
        printf("accept: %s\n", strerror(errno));
        return -1;
    }

    int i = 0;
    do {
#if 0
        for (i = 0;i < MAX_EPOLL_EVENTS;i ++) {
            if (reactor->events[i].status == 0) {
                break;
            }
        }
        if (i == MAX_EPOLL_EVENTS) {
            printf("%s: max connect limit[%d]\n", __func__, MAX_EPOLL_EVENTS);
            break;
        }
#endif
        int flag = 0;
        if ((flag = fcntl(clientfd, F_SETFL, O_NONBLOCK)) < 0) {
            printf("%s: fcntl nonblocking failed, %d\n", __func__, MAX_EPOLL_EVENTS);
            break;
        }

        nty_event_set(&reactor->events[clientfd], clientfd, recv_cb, reactor);
        nty_event_add(reactor->epfd, EPOLLIN, &reactor->events[clientfd]);

    } while (0);

    printf("new connect [%s:%d][time:%ld], pos[%d]\n", 
        inet_ntoa(client_addr.sin_addr), ntohs(client_addr.sin_port), reactor->events[i].last_active, i);

    return 0;

}

int init_sock(short port) {

    int fd = socket(AF_INET, SOCK_STREAM, 0);
    fcntl(fd, F_SETFL, O_NONBLOCK);

    struct sockaddr_in server_addr;
    memset(&server_addr, 0, sizeof(server_addr));
    server_addr.sin_family = AF_INET;
    server_addr.sin_addr.s_addr = htonl(INADDR_ANY);
    server_addr.sin_port = htons(port);

    bind(fd, (struct sockaddr*)&server_addr, sizeof(server_addr));

    if (listen(fd, 20) < 0) {
        printf("listen failed : %s\n", strerror(errno));
    }

    printf("listen port : %d\n", port);

    return fd;
}


int ntyreactor_init(struct ntyreactor *reactor) {

    if (reactor == NULL) return -1;
    memset(reactor, 0, sizeof(struct ntyreactor));

    reactor->epfd = epoll_create(1);
    if (reactor->epfd <= 0) {
        printf("create epfd in %s err %s\n", __func__, strerror(errno));
        return -2;
    }

    reactor->events = (struct ntyevent*)malloc((MAX_EPOLL_EVENTS) * sizeof(struct ntyevent));
    if (reactor->events == NULL) {
        printf("create epfd in %s err %s\n", __func__, strerror(errno));
        close(reactor->epfd);
        return -3;
    }
}

int ntyreactor_destory(struct ntyreactor *reactor) {

    close(reactor->epfd);
    free(reactor->events);

}



int ntyreactor_addlistener(struct ntyreactor *reactor, int sockfd, NCALLBACK *acceptor) {

    if (reactor == NULL) return -1;
    if (reactor->events == NULL) return -1;

    nty_event_set(&reactor->events[sockfd], sockfd, acceptor, reactor);
    nty_event_add(reactor->epfd, EPOLLIN, &reactor->events[sockfd]);

    return 0;
}



int ntyreactor_run(struct ntyreactor *reactor) {
    if (reactor == NULL) return -1;
    if (reactor->epfd < 0) return -1;
    if (reactor->events == NULL) return -1;
    
    struct epoll_event events[MAX_EPOLL_ITEM];
    
    int checkpos = 0, i;

    while (1) {
#if 0
        long now = time(NULL);
        for (i = 0;i < 100;i ++, checkpos ++) {
            if (checkpos == MAX_EPOLL_EVENTS) {
                checkpos = 0;
            }

            if (reactor->events[checkpos].status != 1) {
                continue;
            }

            long duration = now - reactor->events[checkpos].last_active;

            if (duration >= 60) {
                close(reactor->events[checkpos].fd);
                printf("[fd=%d] timeout\n", reactor->events[checkpos].fd);
                nty_event_del(reactor->epfd, &reactor->events[checkpos]);
            }
        }
#endif
        //                                  一次能从就绪队列带出数量 event长度 时间
        int nready = epoll_wait(reactor->epfd, events, MAX_EPOLL_ITEM, 1000);
        if (nready < 0) {
            printf("epoll_wait error, exit\n");
            continue;
        }

        for (i = 0;i < nready;i ++) {

            struct ntyevent *ev = (struct ntyevent*)events[i].data.ptr;

            if ((events[i].events & EPOLLIN) && (ev->events & EPOLLIN)) {
                ev->callback(ev->fd, events[i].events, ev->arg);
            }
            if ((events[i].events & EPOLLOUT) && (ev->events & EPOLLOUT)) {
                ev->callback(ev->fd, events[i].events, ev->arg);
            }
            
        }

    }
}

int main(int argc, char *argv[]) {

    unsigned short port = SERVER_PORT;
    if (argc == 2) {
        port = atoi(argv[1]);
    }
    struct ntyreactor *reactor = (struct ntyreactor*)malloc(sizeof(struct ntyreactor));
    ntyreactor_init(reactor);
    

    int listenfd[LISTEN_PORT_COUNT] = {0};
    int i = 0;

    for (i = 0;i < LISTEN_PORT_COUNT;i ++) {
        listenfd[i] = init_sock(port+i);
        ntyreactor_addlistener(reactor, listenfd[i], accept_cb);
    }
    
    ntyreactor_run(reactor);

    ntyreactor_destory(reactor);

    for (i = 0;i < LISTEN_PORT_COUNT;i ++) {
        close(listenfd[i]);
    }
    

    return 0;
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352

mul_port_client_epoll.c


#include <stdio.h>
#include <string.h>
#include <stdlib.h>

#include <sys/types.h>
#include <sys/socket.h>
#include <sys/epoll.h>
#include <errno.h>
#include <netinet/tcp.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <fcntl.h>


#define MAX_BUFFER      128
#define MAX_EPOLLSIZE   (384*1024)
#define MAX_PORT        100

#define TIME_SUB_MS(tv1, tv2)  ((tv1.tv_sec - tv2.tv_sec) * 1000 + (tv1.tv_usec - tv2.tv_usec) / 1000)

int isContinue = 0;

static int ntySetNonblock(int fd) {
    int flags;

    flags = fcntl(fd, F_GETFL, 0);
    if (flags < 0) return flags;
    flags |= O_NONBLOCK;
    if (fcntl(fd, F_SETFL, flags) < 0) return -1;
    return 0;
}

static int ntySetReUseAddr(int fd) {
    int reuse = 1;
    return setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *)&reuse, sizeof(reuse));
}



int main(int argc, char **argv) {
    if (argc <= 2) {
        printf("Usage: %s ip port\n", argv[0]);
        exit(0);
    }

    const char *ip = argv[1];
    int port = atoi(argv[2]);
    int connections = 0;
    char buffer[128] = {0};
    int i = 0, index = 0;

    struct epoll_event events[MAX_EPOLLSIZE];
    
    int epoll_fd = epoll_create(MAX_EPOLLSIZE);
    
    strcpy(buffer, " Data From MulClient\n");
        
    struct sockaddr_in addr;
    memset(&addr, 0, sizeof(struct sockaddr_in));
    
    addr.sin_family = AF_INET;
    addr.sin_addr.s_addr = inet_addr(ip);

    struct timeval tv_begin;
    gettimeofday(&tv_begin, NULL);

    while (1) {
        if (++index >= MAX_PORT) index = 0;
        
        struct epoll_event ev;
        int sockfd = 0;

        if (connections < 500000 && !isContinue) {
            sockfd = socket(AF_INET, SOCK_STREAM, 0);
            if (sockfd == -1) {
                perror("socket");
                goto err;
            }

            //ntySetReUseAddr(sockfd);
            addr.sin_port = htons(port+index);

            if (connect(sockfd, (struct sockaddr*)&addr, sizeof(struct sockaddr_in)) < 0) {
                perror("connect");
                goto err;
            }
            ntySetNonblock(sockfd);
            ntySetReUseAddr(sockfd);

            sprintf(buffer, "Hello Server: client --> %d\n", connections);
            send(sockfd, buffer, strlen(buffer), 0);

            ev.data.fd = sockfd;
            ev.events = EPOLLIN | EPOLLOUT;
            epoll_ctl(epoll_fd, EPOLL_CTL_ADD, sockfd, &ev);
        
            connections ++;
        }
        //connections ++;
        if (connections % 1000 == 999 || connections >= 340000) {
            struct timeval tv_cur;
            memcpy(&tv_cur, &tv_begin, sizeof(struct timeval));
            
            gettimeofday(&tv_begin, NULL);

            int time_used = TIME_SUB_MS(tv_begin, tv_cur);
            printf("connections: %d, sockfd:%d, time_used:%d\n", connections, sockfd, time_used);

            int nfds = epoll_wait(epoll_fd, events, connections, 100);
            for (i = 0;i < nfds;i ++) {
                int clientfd = events[i].data.fd;

                if (events[i].events & EPOLLOUT) {
                    sprintf(buffer, "data from %d\n", clientfd);
                    send(sockfd, buffer, strlen(buffer), 0);
                } else if (events[i].events & EPOLLIN) {
                    char rBuffer[MAX_BUFFER] = {0};             
                    ssize_t length = recv(sockfd, rBuffer, MAX_BUFFER, 0);
                    if (length > 0) {
                        printf(" RecvBuffer:%s\n", rBuffer);

                        if (!strcmp(rBuffer, "quit")) {
                            isContinue = 0;
                        }
                        
                    } else if (length == 0) {
                        printf(" Disconnect clientfd:%d\n", clientfd);
                        connections --;
                        close(clientfd);
                    } else {
                        if (errno == EINTR) continue;

                        printf(" Error clientfd:%d, errno:%d\n", clientfd, errno);
                        close(clientfd);
                    }
                } else {
                    printf(" clientfd:%d, errno:%d\n", clientfd, errno);
                    close(clientfd);
                }
            }
        }

        usleep(1 * 1000);
    }

    return 0;

err:
    printf("error : %s\n", strerror(errno));
    return 0;
    
}

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154