I have the following socket server code for Linux using Pthreads.
If I start stress testing this with:
for ((;;)); do echo STAT | nc 127.0.0.1 5555 ; done
The memory increases to about 270MB then the server becomes unresponsive:
271688 ./test
On ARMv7 architecture this takes a shorter time. If I stop the requests with the for loop, the memory does not get freed up, meaning that this server would sooner or later crash anyway in production.
Compiles with:
$ gcc -lpthread -o test test.c
What can be the problem in the following code?
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include <stdint.h> #include <fcntl.h> #include <termios.h> #include <errno.h> #include <sys/ioctl.h> #include <sys/socket.h> #include <arpa/inet.h> #include <pthread.h> char *Match(const char *instr, const char *pattern) { const char *p, *q; for (; *instr; instr++) { for (p = instr, q = pattern; *p && *q; p++, q++) if (*p != *q) break; if (p == q || *q == 0) return (char *)instr; } return NULL; } void *connection_handler2(void *socket_desc) { int sock = *(int*)socket_desc; int comm, i; int read_size; char *message, client_message[500], data[50]; char buf[256]; strcpy(data, "Test message back"); message = "Hello this is test Servern"; write(sock, message, strlen(message)); while ((read_size = recv(sock, client_message, 500, 0)) > 0) { if (Match(client_message, "STAT") != NULL) { write(sock, data, strlen(data)); } if (Match(client_message, "QUIT") != NULL) break; } if (read_size == 0) { puts("Client disconnected"); fflush(stdout); } else if (read_size == -1) { perror("recv failed"); } close(sock); //Free the socket pointer free(socket_desc); return 0; } void *s2_thread() { int n, i; int opt = 1; int socket_desc, client_sock, c, *new_sock; struct sockaddr_in server, client; struct termios toptions; //Create socket socket_desc = socket(AF_INET, SOCK_STREAM, 0); if (socket_desc == -1) { printf(" Could not create socket"); } //Prepare the sockaddr_in structure server.sin_family = AF_INET; server.sin_addr.s_addr = INADDR_ANY; server.sin_port = htons(5555); //bind the socket to the address setsockopt(socket_desc, SOL_SOCKET, SO_REUSEADDR, (const char *)&opt, sizeof(int)); //Bind if (bind(socket_desc, (struct sockaddr *)&server, sizeof(server)) < 0) { //print the error message perror("port 5000 bind failed"); return; } //Listen listen(socket_desc, 3); //Accept c = sizeof(struct sockaddr_in); //Endless main loop while ((client_sock = accept(socket_desc, (struct sockaddr *)&client, (socklen_t*)&c))) { pthread_t t2; new_sock = malloc(1); *new_sock = client_sock; if (pthread_create(&t2, NULL, connection_handler2, (void*)new_sock) < 0) { return; } } if (client_sock < 0) { return; } return; } int main(int argc, char *argv[]) { int n, i; int opt = 1; int socket_desc, client_sock, c, *new_sock; struct sockaddr_in server, client; pthread_t ss2t; if (pthread_create(&ss2t, NULL, s2_thread, NULL) < 0) { return 1; } puts("Telnet server started"); while (1) sleep(5); return 0; }
Advertisement
Answer
The memory increases to about 270MB then the server becomes unresponsive
You want to either detach the threads created or join them.
As it stands each thread which ended is zombiing around with all its resource (stack and book keeping data) still being allocated. This eats up the server’s memory.
To have a thread detach, have it call
pthread_detach(pthread_self());
before it ends.
To join a thread have any other thread call pthread_join()
passing the thread-id as returned by pthread_create()
.
As the program does not seem to be interested in the threads’ results, detaching them might be the preferred solution.
There are other issues with the code, which are unrelated to the resource leak:
- how the code handles the result of
recv()
. - error checking for most relevant function calls is missing.
new_sock = malloc(1);
allocates to few memory for anint
asnew_sock
is.c
should be of typesocklen_t
. Using the casting hammer on its address does not make things better