8e430b6f726f1a4da5de45896479a34c2292ab3f
[buftee] / buftee.c
1 /*-
2 * Copyright (c) 2013 Emil Mikulic <emikulic@gmail.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 *
16 * [ http://www.openbsd.org/cgi-bin/cvsweb/src/share/misc/license.template ]
17 */
18
19 // buftee: like tee(1) but buffers in memory.
20 //
21 // Read from stdin into memory all the time, so that the writer doesn't
22 // block. Our memory usage is unbounded.
23 //
24 // Write to a number of file descriptors whenever we're able to.
25 // Because write() can take a long time even with O_NONBLOCK set on the
26 // fd, we have to do this from a separate kernel scheduling entity (we use
27 // pthreads)
28 //
29 // We use a thread per output filedescriptor so that one slow output (e.g. disk)
30 // doesn't block another (e.g. stdout to a terminal) (or vice-versa if you use
31 // flow control on the terminal!)
32
33 #define _GNU_SOURCE // for clock_gettime()
34
35 #include <sys/queue.h>
36 #include <sys/select.h>
37 #include <sys/syscall.h>
38
39 #include <assert.h>
40 #include <err.h>
41 #include <errno.h>
42 #include <fcntl.h>
43 #include <pthread.h>
44 #include <signal.h>
45 #include <stdio.h>
46 #include <stdlib.h>
47 #include <string.h>
48 #include <time.h>
49 #include <unistd.h>
50
51 #define READ_BUF_SIZE 4096
52 #define SLOW_NSEC 4000
53
54 // *** GLOBALS *****************************************************************
55
56 // All queues are locked through one global mutex.
57 static pthread_mutex_t shared_queue_mutex = PTHREAD_MUTEX_INITIALIZER;
58
59 // When a writer runs out of work to do, it sleeps on this global cond.
60 static pthread_cond_t shared_wakeup_cond = PTHREAD_COND_INITIALIZER;
61
62 // Asserted on receipt of SIGTERM, SIGINT.
63 static volatile int stopping = 0;
64
65 // *** (end globals) ***********************************************************
66
67 static void sig_stopping(int _ignored_ __attribute__((__unused__))) {
68 stopping = 1;
69 }
70
71 // Reference-counted buffer, contains data that was read in the main thread.
72 struct buf {
73 char* data;
74 int len;
75 int refcount;
76 };
77
78 // Queue of buffers.
79 struct buf_queue_elem {
80 struct buf* buf;
81 STAILQ_ENTRY(buf_queue_elem) entries;
82 };
83 STAILQ_HEAD(buf_queue, buf_queue_elem); // struct buf_queue
84
85 // Context for a writer thread.
86 struct writer_thread {
87 pthread_t thread;
88 int fd;
89
90 // Each writer has its own queue.
91 struct buf_queue queue;
92
93 STAILQ_ENTRY(writer_thread) entries;
94 };
95
96 // A list of writer threads.
97 STAILQ_HEAD(writer_thread_list, writer_thread); // struct writer_thread_list
98
99 static struct buf* alloc_buf(const char* const data, const int len) {
100 assert(len > 0);
101 struct buf* buf = malloc(sizeof(*buf));
102 buf->data = malloc((size_t)len);
103 memcpy(buf->data, data, (size_t)len);
104 buf->len = len;
105 buf->refcount = 0;
106 return buf;
107 }
108
109 static void unref_buf(struct buf* buf) {
110 assert(buf->refcount > 0);
111 if ((--buf->refcount) == 0) {
112 free(buf->data);
113 free(buf);
114 }
115 }
116
117 static void enqueue(struct buf_queue* restrict queue,
118 struct buf* restrict buf) {
119 struct buf_queue_elem* elem = malloc(sizeof(*elem));
120 elem->buf = buf;
121 buf->refcount++;
122 STAILQ_INSERT_TAIL(queue, elem, entries);
123 }
124
125 static struct buf* dequeue(struct buf_queue* const queue) {
126 assert(!STAILQ_EMPTY(queue));
127 struct buf_queue_elem* head;
128 head = STAILQ_FIRST(queue);
129 STAILQ_REMOVE_HEAD(queue, entries);
130 struct buf* buf = head->buf;
131 free(head);
132 return buf;
133 }
134
135 static void xpthread_create(pthread_t* thread,
136 void* (*start_routine)(void*),
137 void* arg) {
138 int ret = pthread_create(thread, NULL, start_routine, arg);
139 if (ret == 0) return;
140 errno = ret;
141 err(1, "pthread_create(%p) failed", thread);
142 }
143
144 static void set_nonblocking(const int fd) {
145 int flags;
146 if ((flags = fcntl(fd, F_GETFL)) == -1)
147 err(1, "fcntl(fd = %d, F_GETFL) failed", fd);
148 if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1)
149 err(1, "fcntl(fd = %d, F_SETFL, O_NONBLOCK) failed", fd);
150 }
151
152 static int make_file(const char* filename) {
153 int fd = open(filename, O_CREAT | O_EXCL | O_NONBLOCK | O_WRONLY, 0666);
154 if (fd == -1)
155 err(1, "failed to open(\"%s\")", filename);
156 return fd;
157 }
158
159 static void get_mono_time(struct timespec* t) {
160 if (clock_gettime(CLOCK_MONOTONIC, t) == -1)
161 err(1, "clock_gettime(CLOCK_MONOTONIC) failed");
162 }
163
164 static void time_diff(const struct timespec* restrict start,
165 const struct timespec* restrict end,
166 struct timespec* restrict out) {
167 out->tv_sec = end->tv_sec - start->tv_sec;
168 out->tv_nsec = end->tv_nsec - start->tv_nsec;
169 if (out->tv_nsec < 0) {
170 out->tv_sec -= 1;
171 out->tv_nsec += 1000000000;
172 }
173 assert(out->tv_sec >= 0);
174 assert(out->tv_nsec >= 0);
175 assert(out->tv_nsec < 1000000000);
176 }
177
178 static int gettid(void) {
179 return (int)syscall(SYS_gettid);
180 }
181
182 static void warn_time(const char* desc,
183 const struct timespec* restrict start,
184 const struct timespec* restrict end) {
185 struct timespec diff;
186 time_diff(start, end, &diff);
187 if (diff.tv_sec > 0 || diff.tv_nsec > SLOW_NSEC) {
188 char buf[128];
189 extern char *__progname; // This is where glibc stashes argv[0].
190 snprintf(buf, sizeof(buf), "%s:tid %d: %s took %d.%09d secs\n",
191 __progname, gettid(), desc, (int)diff.tv_sec, (int)diff.tv_nsec);
192 // Best effort write to a non-blocking stderr.
193 (void)write(STDERR_FILENO, buf, strlen(buf));
194 }
195 }
196
197 static int xread(const int fd, char* const restrict buf, const int count) {
198 ssize_t read_ret;
199 int saved_errno;
200 struct timespec t0;
201 struct timespec t1;
202
203 get_mono_time(&t0);
204 read_ret = read(fd, buf, (size_t)count);
205 saved_errno = errno;
206 get_mono_time(&t1);
207 warn_time("read()", &t0, &t1);
208
209 errno = saved_errno;
210 if (read_ret == -1)
211 err(1, "read(fd = %d, count = %d) failed", fd, count);
212 //FIXME: EAGAIN?
213
214 assert(read_ret >= 0);
215 return (int)read_ret;
216 }
217
218 static void wait_until_writable(const int fd) {
219 fd_set write_fds;
220 FD_ZERO(&write_fds);
221 FD_SET(fd, &write_fds);
222 int select_ret = select(fd + 1, NULL, &write_fds, NULL, NULL);
223 if (select_ret == -1) {
224 if (errno == EINTR) {
225 assert(stopping); // that should have been SIGTERM
226 return;
227 }
228 err(1, "select(write fd = %d) failed", fd);
229 }
230 if (!FD_ISSET(fd, &write_fds))
231 errx(1, "select() did not return writable fd = %d", fd);
232 }
233
234 static int xwrite(const int fd, struct buf* const buf) {
235 ssize_t write_ret;
236 int saved_errno;
237 struct timespec t0;
238 struct timespec t1;
239
240 for (;;) {
241 get_mono_time(&t0);
242 write_ret = write(fd, buf->data, (size_t)buf->len);
243 saved_errno = errno;
244 get_mono_time(&t1);
245 warn_time("write()", &t0, &t1);
246
247 errno = saved_errno;
248 if (write_ret == -1) {
249 if (errno == EAGAIN) {
250 warn("write(fd = %d) got EAGAIN, sleeping and retrying", fd);
251 wait_until_writable(fd);
252 continue;
253 }
254 err(1, "write(fd = %d, count = %d) failed", fd, buf->len);
255 }
256 if (write_ret == 0)
257 return 0; // EOF
258 assert(write_ret >= 0);
259 if (write_ret < buf->len)
260 err(1, "write(fd = %d, count = %d) stopped short (returned %d)",
261 fd, buf->len, (int)write_ret);
262 // FIXME: handle this
263 assert(write_ret == buf->len);
264 return (int)write_ret;
265 }
266 }
267
268 static void wait_until_readable(const int fd) {
269 fd_set read_fds;
270 FD_ZERO(&read_fds);
271 FD_SET(fd, &read_fds);
272 int select_ret = select(fd + 1, &read_fds, NULL, NULL, NULL);
273 if (select_ret == -1) {
274 if (errno == EINTR) {
275 assert(stopping); // that should have been SIGTERM
276 return;
277 }
278 err(1, "select(read fd = %d) failed", fd);
279 }
280 if (!FD_ISSET(fd, &read_fds))
281 errx(1, "select() did not return readable fd = %d", fd);
282 }
283
284 static void lock(pthread_mutex_t* mutex) {
285 int ret = pthread_mutex_lock(mutex);
286 if (ret == 0) return;
287 errno = ret;
288 err(1, "pthread_mutex_lock(%p) failed", mutex);
289 }
290
291 static void unlock(pthread_mutex_t* mutex) {
292 int ret = pthread_mutex_unlock(mutex);
293 if (ret == 0) return;
294 errno = ret;
295 err(1, "pthread_mutex_unlock(%p) failed", mutex);
296 }
297
298 static void* writer_routine(void *arg) {
299 struct writer_thread* my = arg;
300 struct buf* buf = NULL;
301 lock(&shared_queue_mutex);
302 for (;;) {
303 while (!stopping && STAILQ_EMPTY(&my->queue)) {
304 // Sleep.
305 pthread_cond_wait(&shared_wakeup_cond, &shared_queue_mutex);
306 }
307 if (!STAILQ_EMPTY(&my->queue))
308 buf = dequeue(&my->queue);
309 unlock(&shared_queue_mutex);
310
311 if (stopping) break;
312 assert(buf != NULL);
313
314 // Write.
315 int write_ret = xwrite(my->fd, buf);
316 if (write_ret == 0) {
317 errx(1, "fd %d hit EOF", my->fd);
318 }
319 assert(write_ret == buf->len);
320
321 // Unreference buffer, freeing it if we have to.
322 lock(&shared_queue_mutex);
323 unref_buf(buf);
324 }
325 warnx("thread exiting cleanly");
326 return NULL;
327 }
328
329 static void add_writer_thread(struct writer_thread_list* list, const int fd) {
330 set_nonblocking(fd);
331 struct writer_thread* writer = malloc(sizeof(*writer));
332 writer->fd = fd;
333 STAILQ_INIT(&(writer->queue));
334 STAILQ_INSERT_TAIL(list, writer, entries);
335 xpthread_create(&(writer->thread), writer_routine, writer);
336 }
337
338 static void xpthread_cond_broadcast(pthread_cond_t* cond) {
339 int ret = pthread_cond_broadcast(cond);
340 if (ret == 0) return;
341 errno = ret;
342 err(1, "pthread_cond_broadcast(%p) failed", cond);
343 }
344
345 static void xpthread_join(pthread_t thread) {
346 int ret = pthread_join(thread, NULL);
347 if (ret == 0) return;
348 errno = ret;
349 err(1, "pthread_join(%lu) failed", thread);
350 }
351
352 static void sig_continue(int _ignored_ __attribute__((__unused__))) {
353 set_nonblocking(STDERR_FILENO);
354 }
355
356 int main(int argc, char **argv) {
357 struct writer_thread_list writers;
358 STAILQ_INIT(&writers);
359
360 if (signal(SIGINT, sig_stopping) == SIG_ERR) err(1, "signal() failed");
361 if (signal(SIGTERM, sig_stopping) == SIG_ERR) err(1, "signal() failed");
362 if (signal(SIGCONT, sig_continue) == SIG_ERR) err(1, "signal() failed");
363 //if (signal(SIGPIPE, SIG_IGN) == SIG_ERR) err(1, "signal() failed");
364 sig_continue(0);
365
366 // On Linux, making STDOUT non-blocking has the side-effect of
367 // also making STDIN nonblocking.
368 add_writer_thread(&writers, STDOUT_FILENO);
369
370 // Process cmdline args.
371 for (int i = 1; i < argc; i++) {
372 add_writer_thread(&writers, make_file(argv[i]));
373 }
374
375 // Reader loop.
376 while (!stopping) {
377 wait_until_readable(STDIN_FILENO);
378 if (stopping) {
379 warnx("stopping after select()");
380 break;
381 }
382
383 // Read.
384 char data[READ_BUF_SIZE];
385 int read_ret = xread(STDIN_FILENO, data, sizeof(data));
386 if (read_ret == 0) {
387 warnx("stdin hit EOF");
388 break;
389 }
390 struct buf* buf = alloc_buf(data, read_ret);
391
392 // Enqueue.
393 lock(&shared_queue_mutex);
394 struct writer_thread* writer;
395 STAILQ_FOREACH(writer, &writers, entries) enqueue(&(writer->queue), buf);
396 xpthread_cond_broadcast(&shared_wakeup_cond);
397 unlock(&shared_queue_mutex);
398 }
399
400 // Wake and join threads.
401 stopping = 1;
402 lock(&shared_queue_mutex);
403 xpthread_cond_broadcast(&shared_wakeup_cond);
404 unlock(&shared_queue_mutex);
405 {
406 struct writer_thread* writer;
407 STAILQ_FOREACH(writer, &writers, entries) xpthread_join(writer->thread);
408 }
409
410 // Free writer list.
411 while (!STAILQ_EMPTY(&writers)) {
412 struct writer_thread* writer = STAILQ_FIRST(&writers);
413 STAILQ_REMOVE_HEAD(&writers, entries);
414 // FIXME: free its queue?
415 free(writer);
416 }
417
418 warnx("exiting cleanly");
419 return 0;
420 }
421 // vim:set ts=2 sw=2 tw=80 et: