allocator_thread: fix segfault with weechat 2.0

it was reported that weechat 2.0 on ubuntu 16.04 LTS x86_64 segfaulted like this:

4 0x00007f6bf0e7e0c0 in __stack_chk_fail () at stack_chk_fail.c:28
5 0x00007f6bf2536bce in at_get_ip_for_host (host=0x339c4d0 "abcdefghijklmnop.onion", len=22) at src/allocator_thread.c:290
 readbuf = {octet = "irc.", as_int = 778269289} msg = {msgtype = ATM_GETNAME, datalen = 13}

what happened was that weechat forked, thus got its own private copy of the VM
and thus a private copy of the mutex which should prevent parallel use of
at_get_ip_for_host() & friends. therefore the following race was possible:
- process A writes a message of type ATM_GETIP into the server pipe
- process B writes a message of type ATM_GETNAME into the server pipe
- process A write transaction is finished, and goes into receive mode
- server thread reads process B's message and responds with a ATM_GETNAME msg
- process A reads the response which was intended for process B into the 4 byte
  ip address buffer, but ATM_GETNAME are much larger than ATM_GETIP responses,
  resulting in stack corruption.

to prevent this issue, the storage of the mutex must reside in shared memory,
which we achieve via mmap. alternatively, shm_open() or sysvipc shm stuff could
be used. the former requires the mmap call to happen before the fork, the latter
not, however the shm would require a named object in /dev/shm (which requires
generating a unique name per proxychains instance, and subsequent cleanup).
so in the end, the mmap is easier to deal with, and we can be reasonably
certain that our constructor is being run before the hooked application forks.
pull/212/merge
rofl0r 2017-12-19 00:10:13 +00:00
parent 3b5f41028b
commit cc7bc891ff
1 changed files with 14 additions and 10 deletions

View File

@ -14,6 +14,7 @@
#include <stdint.h>
#include <stddef.h>
#include <errno.h>
#include <sys/mman.h>
#include "allocator_thread.h"
#include "debug.h"
#include "ip_type.h"
@ -39,9 +40,8 @@ static void *dumpstring(char* s, size_t len) {
return p;
}
pthread_mutex_t internal_ips_lock;
internal_ip_lookup_table *internal_ips = NULL;
internal_ip_lookup_table internal_ips_buf;
static pthread_mutex_t *internal_ips_lock;
static internal_ip_lookup_table *internal_ips;
uint32_t index_from_internal_ip(ip_type4 internalip) {
PFUNC();
@ -280,7 +280,7 @@ static void* threadfunc(void* x) {
ip_type4 at_get_ip_for_host(char* host, size_t len) {
ip_type4 readbuf;
MUTEX_LOCK(&internal_ips_lock);
MUTEX_LOCK(internal_ips_lock);
if(len > MSG_LEN_MAX) goto inv;
struct at_msghdr msg = {.msgtype = ATM_GETIP, .datalen = len + 1 };
if(sendmessage(ATD_SERVER, &msg, host) &&
@ -289,19 +289,19 @@ ip_type4 at_get_ip_for_host(char* host, size_t len) {
inv:
readbuf = ip_type_invalid.addr.v4;
}
MUTEX_UNLOCK(&internal_ips_lock);
MUTEX_UNLOCK(internal_ips_lock);
return readbuf;
}
size_t at_get_host_for_ip(ip_type4 ip, char* readbuf) {
struct at_msghdr msg = {.msgtype = ATM_GETNAME, .datalen = sizeof(ip_type4) };
size_t res = 0;
MUTEX_LOCK(&internal_ips_lock);
MUTEX_LOCK(internal_ips_lock);
if(sendmessage(ATD_SERVER, &msg, &ip) && getmessage(ATD_CLIENT, &msg, readbuf)) {
if((ptrdiff_t) msg.datalen <= 0) res = 0;
else res = msg.datalen - 1;
}
MUTEX_UNLOCK(&internal_ips_lock);
MUTEX_UNLOCK(internal_ips_lock);
return res;
}
@ -326,8 +326,12 @@ static void initpipe(int* fds) {
* be used to place responses and arguments */
void at_init(void) {
PFUNC();
MUTEX_INIT(&internal_ips_lock);
internal_ips = &internal_ips_buf;
void *shm = mmap(0, 4096, PROT_WRITE|PROT_READ, MAP_ANON|MAP_SHARED, -1, 0);
assert(shm);
internal_ips_lock = shm;
internal_ips = (void*)((char*)shm + 2048);
MUTEX_INIT(internal_ips_lock);
memset(internal_ips, 0, sizeof *internal_ips);
initpipe(req_pipefd);
initpipe(resp_pipefd);
@ -347,5 +351,5 @@ void at_close(void) {
close(req_pipefd[1]);
close(resp_pipefd[0]);
close(resp_pipefd[1]);
MUTEX_DESTROY(&internal_ips_lock);
MUTEX_DESTROY(internal_ips_lock);
}