diff src/io/tx_deflate.c @ 0:d39e1d0d75b6

initial add
author paulo@hit-nxdomain.opendns.com
date Sat, 20 Feb 2010 21:18:28 -0800
parents
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/io/tx_deflate.c	Sat Feb 20 21:18:28 2010 -0800
     1.3 @@ -0,0 +1,638 @@
     1.4 +/*
     1.5 + * $Id: tx_deflate.c,v 1.15 2004/05/02 08:55:00 hipnod Exp $
     1.6 + *
     1.7 + * Copyright (C) 2004 giFT project (gift.sourceforge.net)
     1.8 + *
     1.9 + * This program is free software; you can redistribute it and/or modify it
    1.10 + * under the terms of the GNU General Public License as published by the
    1.11 + * Free Software Foundation; either version 2, or (at your option) any
    1.12 + * later version.
    1.13 + *
    1.14 + * This program is distributed in the hope that it will be useful, but
    1.15 + * WITHOUT ANY WARRANTY; without even the implied warranty of
    1.16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
    1.17 + * General Public License for more details.
    1.18 + */
    1.19 +
    1.20 +#include "gt_gnutella.h"
    1.21 +#include "gt_packet.h"       /* packet manipulation macros */
    1.22 +
    1.23 +#include "io/tx_stack.h"
    1.24 +#include "io/tx_layer.h"
    1.25 +#include "io/io_buf.h"
    1.26 +
    1.27 +#include <zlib.h>
    1.28 +
    1.29 +/*****************************************************************************/
    1.30 +
    1.31 +#define DEFLATE_DEBUG    0
    1.32 +
    1.33 +#if DEFLATE_DEBUG
    1.34 +#define DEFLATE_TRACEFN(tx) \
    1.35 +	GT->DBGSOCK (GT, tx->stack->c, "entered")
    1.36 +
    1.37 +#define DEFLATE_DUMP(tx_deflate) \
    1.38 +{ \
    1.39 +	if (DEFLATE_DEBUG) \
    1.40 +	{ \
    1.41 +		float percent = ((float)tx_deflate->nbytes_in - \
    1.42 +		                 tx_deflate->nbytes_out - tx_deflate->nbytes_unflushed) / \
    1.43 +		                 (float)tx_deflate->nbytes_in; \
    1.44 +\
    1.45 +		GT->DBGSOCK (GT, tx->stack->c, "in %lu out %lu flushed %lu unflushed %lu (flushing %d) " \
    1.46 +		             "ratio %.2f%% avg %.2f", \
    1.47 +		             (long)tx_deflate->nbytes_in, (long)tx_deflate->nbytes_out, \
    1.48 +		             (long)tx_deflate->nbytes_flushed, \
    1.49 +		             (long)tx_deflate->nbytes_unflushed, \
    1.50 +		             (long)tx_deflate->flushing, percent * 100.0, \
    1.51 +		             (double)tx_deflate->nbytes_out / \
    1.52 +		                     difftime (time (NULL), tx->stack->start_time)); \
    1.53 +	} \
    1.54 +}
    1.55 +#else /* !DEFLATE_DEBUG */
    1.56 +#define DEFLATE_TRACEFN(tx)
    1.57 +#define DEFLATE_DUMP(tx_deflate)
    1.58 +#endif /* DEFLATE_DEBUG */
    1.59 +
    1.60 +/*****************************************************************************/
    1.61 +
    1.62 +#define TX_DEFLATE_BUFSIZE  (1024 - 1)      /* -1 for auto-nullification */
    1.63 +
    1.64 +#define FLUSH_AFTER         (4096)          /* flush after this many bytes */
    1.65 +
    1.66 +#define NAGLE_TIMEOUT       (200 * MSEC)    /* 200 milliseconds */
    1.67 +
    1.68 +/*****************************************************************************/
    1.69 +
    1.70 +struct tx_deflate
    1.71 +{
    1.72 +	/* zlib data */
    1.73 +	z_stream       z;
    1.74 +
    1.75 +	/* compressed buffer */
    1.76 +	struct io_buf *buf;
    1.77 +
    1.78 +	/* Nagle timer that sends stored data after NAGLE_TIMEOUT milliseconds */
    1.79 +	timer_id       nagle_timer;
    1.80 +
    1.81 +	size_t         nbytes_in;        /* total uncompressed bytes  */
    1.82 +	size_t         nbytes_out;       /* total compressed bytes */
    1.83 +	size_t         nbytes_flushed;   /* total bytes written to lower layer */
    1.84 +	size_t         nbytes_unflushed; /* bytes currently waiting in z_stream */
    1.85 +
    1.86 +	/*
    1.87 +	 * Whether the zstream is currently being flushed, and so whether deflate
    1.88 +	 * must receive a Z_SYNC_FLUSH parameter to continue flushing.  The flush
    1.89 +	 * ends when deflate returns with avail_out > 0.
    1.90 +	 */
    1.91 +	BOOL           flushing;
    1.92 +
    1.93 +	/*
    1.94 +	 * When doing a flush, it's possible that there will be a partially
    1.95 +	 * filled buffer leftover.  If there's no new data that comes in, the data
    1.96 +	 * will be delayed again until more data comes from the upper layer.  This
    1.97 +	 * flag is set when this happens, so we know that we should flush the
    1.98 +	 * buffer to the lower layer as soon as possible, even if it isn't
    1.99 +	 * completely full.
   1.100 +	 */
   1.101 +	BOOL           delayed;
   1.102 +};
   1.103 +
   1.104 +/*****************************************************************************/
   1.105 +
   1.106 +static void start_nagle_timer (struct tx_layer *tx, struct tx_deflate *deflate);
   1.107 +static void stop_nagle_timer  (struct tx_layer *tx, struct tx_deflate *deflate);
   1.108 +
   1.109 +/*****************************************************************************/
   1.110 +
   1.111 +static void tx_deflate_enable (struct tx_layer *tx)
   1.112 +{
   1.113 +	/* TODO */
   1.114 +}
   1.115 +
   1.116 +static void tx_deflate_disable (struct tx_layer *tx)
   1.117 +{
   1.118 +	/* TODO */
   1.119 +}
   1.120 +
   1.121 +/*****************************************************************************/
   1.122 +
   1.123 +static void tx_deflate_toggle (struct tx_layer *tx, BOOL stop)
   1.124 +{
   1.125 +	/* nothing, we do not consume packets, only pass along */
   1.126 +}
   1.127 +
   1.128 +/*****************************************************************************/
   1.129 +
   1.130 +static BOOL alloc_buffer (struct tx_deflate *tx_deflate)
   1.131 +{
   1.132 +	if (tx_deflate->buf)
   1.133 +		return TRUE;
   1.134 +
   1.135 +	if (!(tx_deflate->buf = io_buf_new (TX_DEFLATE_BUFSIZE)))
   1.136 +		return FALSE;
   1.137 +
   1.138 +	return TRUE;
   1.139 +}
   1.140 +
   1.141 +static void finish_flush (struct tx_deflate *tx_deflate)
   1.142 +{
   1.143 +	tx_deflate->nbytes_unflushed = 0;
   1.144 +	tx_deflate->flushing         = FALSE;
   1.145 +}
   1.146 +
   1.147 +static tx_status_t flush_buffer (struct tx_layer *tx,
   1.148 +                                 struct tx_deflate *tx_deflate)
   1.149 +{
   1.150 +	tx_status_t ret;
   1.151 +	size_t      n;
   1.152 +
   1.153 +	DEFLATE_TRACEFN(tx);
   1.154 +
   1.155 +	n = io_buf_read_avail (tx_deflate->buf);
   1.156 +
   1.157 +	/*
   1.158 +	 * The buffer filled up.  Try to send again until the lower
   1.159 +	 * layer is saturated.
   1.160 +	 */
   1.161 +	ret = gt_tx_layer_queue (tx, tx_deflate->buf);
   1.162 +	assert (ret != TX_EMPTY);
   1.163 +
   1.164 +	if (ret == TX_ERROR || ret == TX_FULL)
   1.165 +		return ret;
   1.166 +
   1.167 +	tx_deflate->nbytes_flushed += n;
   1.168 +	assert (ret == TX_OK);
   1.169 +
   1.170 +	stop_nagle_timer (tx, tx_deflate);
   1.171 +
   1.172 +	tx_deflate->buf     = NULL;
   1.173 +	tx_deflate->delayed = FALSE;
   1.174 +
   1.175 +	return TX_OK;
   1.176 +}
   1.177 +
   1.178 +/*
   1.179 + * Try to flush the data inside the z_stream and send it to the layer beneath
   1.180 + * this one.
   1.181 + */
   1.182 +static tx_status_t flush_stream (struct tx_layer *tx,
   1.183 +                                 struct tx_deflate *tx_deflate)
   1.184 +{
   1.185 +	z_stream   *z   = &tx_deflate->z;
   1.186 +	tx_status_t ret;
   1.187 +	int         zret;
   1.188 +	size_t      wlen, old_avail;
   1.189 +
   1.190 +	DEFLATE_TRACEFN(tx);
   1.191 +
   1.192 +	if (!alloc_buffer (tx_deflate))
   1.193 +		return TX_ERROR;
   1.194 +
   1.195 +	old_avail = io_buf_write_avail (tx_deflate->buf);
   1.196 +
   1.197 +	z->avail_in  = 0;
   1.198 +	z->next_in   = NULL;  /* don't disrupt anything else */
   1.199 +	z->next_out  = io_buf_write_ptr (tx_deflate->buf);
   1.200 +	z->avail_out = old_avail;
   1.201 +
   1.202 +	zret = deflate (z, Z_SYNC_FLUSH);
   1.203 +
   1.204 +	/*
   1.205 +	 * If this is true we've already flushed all possible data.
   1.206 +	 */
   1.207 +	if (zret == Z_BUF_ERROR)
   1.208 +	{
   1.209 +		tx_deflate->flushing = FALSE;
   1.210 +
   1.211 +		/* send the stored data */
   1.212 +		if (io_buf_read_avail (tx_deflate->buf) > 0)
   1.213 +			return flush_buffer (tx, tx_deflate);
   1.214 +
   1.215 +		return TX_EMPTY;
   1.216 +	}
   1.217 +
   1.218 +	if (zret != Z_OK)
   1.219 +		return TX_ERROR;
   1.220 +
   1.221 +	wlen = old_avail - z->avail_out;
   1.222 +
   1.223 +	io_buf_push (tx_deflate->buf, wlen);
   1.224 +	tx_deflate->nbytes_out += wlen;
   1.225 +
   1.226 +	tx_deflate->flushing = TRUE;
   1.227 +
   1.228 +	/* if there is space, the flush completed successfully */
   1.229 +	if (z->avail_out > 0)
   1.230 +		finish_flush (tx_deflate);
   1.231 +
   1.232 +	if ((ret = flush_buffer (tx, tx_deflate) != TX_OK))
   1.233 +		return ret;
   1.234 +
   1.235 +	/* stop when the flush completes */
   1.236 +	if (!tx_deflate->flushing)
   1.237 +		return TX_OK;
   1.238 +
   1.239 +	/* tail recurse until the flush completes */
   1.240 +	return flush_stream (tx, tx_deflate);
   1.241 +}
   1.242 +
   1.243 +static BOOL deflate_nagle_timeout (struct tx_layer *tx)
   1.244 +{
   1.245 +	struct tx_deflate *tx_deflate = tx->udata;
   1.246 +	tx_status_t        ret;
   1.247 +
   1.248 +	DEFLATE_TRACEFN(tx);
   1.249 +
   1.250 +	/* this assertion means we have to disarm the timer when sending the
   1.251 +	 * buffer */
   1.252 +	assert (tx_deflate->buf != NULL);
   1.253 +
   1.254 +	ret = flush_stream (tx, tx_deflate);
   1.255 +
   1.256 +	/* no matter what, we disable the Nagle timer after this */
   1.257 +	stop_nagle_timer (tx, tx_deflate);
   1.258 +
   1.259 +	if (ret == TX_ERROR)
   1.260 +	{
   1.261 +		gt_tx_stack_abort (tx->stack);
   1.262 +		return FALSE;
   1.263 +	}
   1.264 +
   1.265 +	if (DEFLATE_DEBUG)
   1.266 +		GT->DBGSOCK (GT, tx->stack->c, "buffer delayed?: %d", tx_deflate->delayed);
   1.267 +
   1.268 +	return FALSE;
   1.269 +}
   1.270 +
   1.271 +static void start_nagle_timer (struct tx_layer *tx,
   1.272 +                               struct tx_deflate *tx_deflate)
   1.273 +{
   1.274 +	if (DEFLATE_DEBUG)
   1.275 +		GT->DBGSOCK (GT, tx->stack->c, "nagle timer=%d", tx_deflate->nagle_timer);
   1.276 +
   1.277 +	if (tx_deflate->nagle_timer != 0)
   1.278 +		return;
   1.279 +
   1.280 +	tx_deflate->nagle_timer = timer_add (NAGLE_TIMEOUT,
   1.281 +	                                     (TimerCallback)deflate_nagle_timeout,
   1.282 +	                                     tx);
   1.283 +}
   1.284 +
   1.285 +static void stop_nagle_timer (struct tx_layer *tx,
   1.286 +                              struct tx_deflate *tx_deflate)
   1.287 +{
   1.288 +	if (DEFLATE_DEBUG)
   1.289 +		GT->DBGSOCK (GT, tx->stack->c, "nagle timer=%d", tx_deflate->nagle_timer);
   1.290 +
   1.291 +	timer_remove_zero (&tx_deflate->nagle_timer);
   1.292 +}
   1.293 +
   1.294 +/*****************************************************************************/
   1.295 +
   1.296 +/*
   1.297 + * The upper layer has sent us a buffer to process.
   1.298 + */
   1.299 +static tx_status_t tx_deflate_queue (struct tx_layer *tx, struct io_buf *msg)
   1.300 +{
   1.301 +	struct tx_deflate *tx_deflate      = tx->udata;
   1.302 +	z_stream          *z               = &tx_deflate->z;
   1.303 +	BOOL               flush_completed = FALSE;
   1.304 +	int                ret;
   1.305 +
   1.306 +	DEFLATE_TRACEFN(tx);
   1.307 +
   1.308 +	/*
   1.309 +	 * Deflate the incoming message, adding it to the buffer.
   1.310 +	 *
   1.311 +	 * If our buffer is currently full, return TX_FULL.
   1.312 +	 */
   1.313 +
   1.314 +	if (!alloc_buffer (tx_deflate))
   1.315 +	{
   1.316 +		io_buf_free (msg);
   1.317 +		return TX_ERROR;
   1.318 +	}
   1.319 +
   1.320 +	z->next_in   = io_buf_read_ptr    (msg);
   1.321 +	z->avail_in  = io_buf_read_avail  (msg);
   1.322 +	z->next_out  = io_buf_write_ptr   (tx_deflate->buf);
   1.323 +	z->avail_out = io_buf_write_avail (tx_deflate->buf);
   1.324 +
   1.325 +	if (z->avail_out == 0)
   1.326 +		return TX_FULL;
   1.327 +
   1.328 +	while (io_buf_read_avail (msg) > 0 && z->avail_out > 0)
   1.329 +	{
   1.330 +		size_t rlen, wlen;
   1.331 +
   1.332 +		assert (z->next_in == io_buf_read_ptr (msg));
   1.333 +		assert (z->next_out == io_buf_write_ptr (tx_deflate->buf));
   1.334 +
   1.335 +		/* begin flushing after a certain amount */
   1.336 +		if (tx_deflate->nbytes_unflushed >= FLUSH_AFTER)
   1.337 +			tx_deflate->flushing = TRUE;
   1.338 +
   1.339 +		ret = deflate (z, tx_deflate->flushing ? Z_SYNC_FLUSH : 0);
   1.340 +
   1.341 +		if (ret != Z_OK)
   1.342 +		{
   1.343 +			GT->DBGFN (GT, "deflate: error %d", ret);
   1.344 +			io_buf_free (msg);
   1.345 +			return TX_ERROR;
   1.346 +		}
   1.347 +
   1.348 +		rlen = io_buf_read_avail (msg) - z->avail_in;
   1.349 +		wlen = io_buf_write_avail (tx_deflate->buf) - z->avail_out;
   1.350 +		assert (rlen > 0 || wlen > 0); /* hmm, is this true when flushing? */
   1.351 +#if 0
   1.352 +		assert (wlen > 0);
   1.353 +#endif
   1.354 +
   1.355 +		tx_deflate->nbytes_in        += rlen;
   1.356 +		tx_deflate->nbytes_unflushed += rlen;
   1.357 +		tx_deflate->nbytes_out       += wlen;
   1.358 +
   1.359 +		DEFLATE_DUMP(tx_deflate);
   1.360 +
   1.361 +		/* update the buffer lengths */
   1.362 +		io_buf_push (tx_deflate->buf, wlen);
   1.363 +		io_buf_pop  (msg, rlen);
   1.364 +
   1.365 +		if (z->avail_out == 0)
   1.366 +			break;
   1.367 +
   1.368 +		/*
   1.369 +		 * If we have available output space and no more input space,
   1.370 +		 * we know the flush completed, so unset flush mode.
   1.371 +		 *
   1.372 +		 * NOTE: there might be a bug here.  The flush may fit exactly
   1.373 +		 * everytime, causing us to never leave flush mode.  I think zlib may
   1.374 +		 * try to prevent this itself, though.
   1.375 +		 */
   1.376 +		if (tx_deflate->flushing && z->avail_in == 0)
   1.377 +		{
   1.378 +			flush_completed = TRUE;
   1.379 +			finish_flush (tx_deflate);
   1.380 +		}
   1.381 +	}
   1.382 +
   1.383 +	/*
   1.384 +	 * If we completed a flush, and the buffer isn't full, set the delayed
   1.385 +	 * flag so that service_deflate() will write the buffer immediately to
   1.386 +	 * reduce latency, as it has already endured a Nagle timeout period.
   1.387 +	 */
   1.388 +	if (flush_completed &&
   1.389 +	    io_buf_read_avail (tx_deflate->buf) < TX_DEFLATE_BUFSIZE)
   1.390 +	{
   1.391 +		if (DEFLATE_DEBUG)
   1.392 +		{
   1.393 +			GT->DBGSOCK (GT, tx->stack->c, "setting ->delayed flag on buf(%d)",
   1.394 +			           io_buf_read_avail (tx_deflate->buf));
   1.395 +		}
   1.396 +
   1.397 +		tx_deflate->delayed = TRUE;
   1.398 +	}
   1.399 +
   1.400 +	/*
   1.401 +	 * If the message buffer was only partially emptied, don't free
   1.402 +	 * it and let tx_layer.c know to handle it specially.
   1.403 +	 */
   1.404 +	if (io_buf_read_avail (msg) > 0)
   1.405 +		return TX_PARTIAL;
   1.406 +
   1.407 +	io_buf_free (msg);
   1.408 +
   1.409 +	return TX_OK;
   1.410 +}
   1.411 +
   1.412 +/*****************************************************************************/
   1.413 +
   1.414 +/*
   1.415 + * Get more data to write.
   1.416 + */
   1.417 +static tx_status_t get_buffers (struct tx_layer *tx,
   1.418 +                                struct tx_deflate *tx_deflate)
   1.419 +{
   1.420 +	if (tx_deflate->buf && io_buf_write_avail (tx_deflate->buf) == 0)
   1.421 +		return TX_OK;
   1.422 +
   1.423 +	return gt_tx_layer_ready (tx);
   1.424 +}
   1.425 +
   1.426 +/*
   1.427 + * This is the most complicated part of the whole stack:
   1.428 + *
   1.429 + * [1] Call upper layer's ready routine to grab a buffer (gt_tx_layer_ready).
   1.430 + *
   1.431 + * [2] That function will call tx_deflate_queue, which compresses the data to
   1.432 + *     a buffer, as many times as it can while there's more data to process.
   1.433 + *
   1.434 + * [3] If we didn't fill the buffer, or there was no data, return TX_EMPTY
   1.435 + *     telling the lower layer there is no data.
   1.436 + *
   1.437 + * [4] If there's no data in the upper layer, but we're in flush mode, call
   1.438 + *     flush_stream() to send whatever data is stored inside the z_stream,
   1.439 + *     and stop.
   1.440 + *
   1.441 + * [5] If we filled the buffer, or if we have a paritally filled buffer that
   1.442 + *     was delayed in deflate_nagle_timeout(), send it to the lower layer with
   1.443 + *     flush_buffer().  If the lower layer returns TX_FULL, stop and return
   1.444 + *     TX_OK.  Otherwise, continue by calling this function recursively.
   1.445 + *
   1.446 + *     NOTE: The buffer is filled in tx_deflate_queue but sent in this
   1.447 + *     function (or from the Nagle timer if the buffer isn't full).
   1.448 + *
   1.449 + * The caller of this function has to setup a Nagle timer if any data was
   1.450 + * written and TX_FULL was not encountered.
   1.451 + */
   1.452 +static tx_status_t service_deflate (struct tx_layer *tx,
   1.453 +                                    struct tx_deflate *tx_deflate)
   1.454 +{
   1.455 +	tx_status_t ret;
   1.456 +
   1.457 +	DEFLATE_TRACEFN(tx);
   1.458 +
   1.459 +	/* [1] + [2] */
   1.460 +	ret = get_buffers (tx, tx_deflate);
   1.461 +
   1.462 +	if (ret == TX_ERROR)
   1.463 +		return TX_ERROR;
   1.464 +
   1.465 +	/* [3] */
   1.466 +	if (ret == TX_EMPTY)
   1.467 +	{
   1.468 +		assert (ret == TX_EMPTY);
   1.469 +
   1.470 +		/* [4]: continue flush even if no data avail */
   1.471 +		if (tx_deflate->flushing)
   1.472 +			ret = flush_stream (tx, tx_deflate);
   1.473 +
   1.474 +		return ret;
   1.475 +	}
   1.476 +
   1.477 +	assert (tx_deflate->buf != NULL);
   1.478 +
   1.479 +	if (DEFLATE_DEBUG)
   1.480 +	{
   1.481 +		if (tx_deflate->delayed)
   1.482 +		{
   1.483 +			GT->DBGSOCK (GT, tx->stack->c, "flushing delayed buf(%d)",
   1.484 +			             io_buf_read_avail (tx_deflate->buf));
   1.485 +		}
   1.486 +	}
   1.487 +
   1.488 +	assert (ret == TX_OK);
   1.489 +
   1.490 +	/*
   1.491 +	 * [5]
   1.492 +	 *
   1.493 +	 * flush_buffer will stop the Nagle timer if the buffer was
   1.494 +	 * successfully sent.
   1.495 +	 *
   1.496 +	 * We must also flush the buffer if it contains partial data from a
   1.497 +	 * previous flush that was delayed in the Nagle timer due to having no
   1.498 +	 * space.
   1.499 +	 */
   1.500 +	if (tx_deflate->delayed || io_buf_write_avail (tx_deflate->buf) == 0)
   1.501 +		ret = flush_buffer (tx, tx_deflate);
   1.502 +
   1.503 +	if (ret != TX_OK)
   1.504 +		return ret;
   1.505 +
   1.506 +	/* tail recurse until the lower layer is saturated */
   1.507 +	return service_deflate (tx, tx_deflate);
   1.508 +}
   1.509 +
   1.510 +/*
   1.511 + * The lower layer is ready to write.
   1.512 + */
   1.513 +static tx_status_t tx_deflate_ready (struct tx_layer *tx)
   1.514 +{
   1.515 +	struct tx_deflate *tx_deflate  = tx->udata;
   1.516 +	size_t             old_flushed;
   1.517 +	tx_status_t        ret;
   1.518 +
   1.519 +	/* keep track of how much was previously flushed */
   1.520 +	old_flushed = tx_deflate->nbytes_flushed;
   1.521 +
   1.522 +	ret = service_deflate (tx, tx_deflate);
   1.523 +
   1.524 +	if (ret == TX_ERROR || ret == TX_FULL)
   1.525 +	{
   1.526 +		if (ret == TX_FULL)
   1.527 +		{
   1.528 +			/* flush buffer shouldve deactivated the Nagle timer */
   1.529 +			assert (tx_deflate->nagle_timer == 0);
   1.530 +
   1.531 +			/* we wrote something -- let caller know it's ok */
   1.532 +			ret = TX_OK;
   1.533 +		}
   1.534 +
   1.535 +		return ret;
   1.536 +	}
   1.537 +
   1.538 +	assert (ret == TX_OK || ret == TX_EMPTY);
   1.539 +
   1.540 +	/*
   1.541 +	 * If the lower layer was not saturated (evidenced by _not_ returning
   1.542 +	 * TX_FULL), and there is a partially completed buffer, the Nagle
   1.543 +	 * timer must be armed.  This ensures the data waiting in this layer will
   1.544 +	 * go out in a timely manner.  If the lower layer was saturated, we don't
   1.545 +	 * need to arm the timer because there is no buffer space to flush to
   1.546 +	 * anyway, and when the lower layer unsaturates it will reinvoke this
   1.547 +	 * layer to write more data.
   1.548 +	 *
   1.549 +	 * TODO: Still need to flush if there is some urgent data waiting.  So,
   1.550 +	 * should add a ->flush callback.
   1.551 +	 *
   1.552 +	 * XXX: Using tx_deflate->buf != NULL as a hacky way to recognize that
   1.553 +	 * some data was written to the z_stream.
   1.554 +	 */
   1.555 +	if (tx_deflate->buf != NULL)
   1.556 +		start_nagle_timer (tx, tx_deflate);
   1.557 +
   1.558 +	if (DEFLATE_DEBUG)
   1.559 +	{
   1.560 +		GT->DBGSOCK (GT, tx->stack->c, "buf waiting=[%d] ret=%s",
   1.561 +		             tx_deflate->buf ? io_buf_read_avail (tx_deflate->buf) : 0,
   1.562 +		             ret == TX_EMPTY ? "TX_EMPTY" : "TX_OK");
   1.563 +	}
   1.564 +
   1.565 +	DEFLATE_DUMP(tx_deflate);
   1.566 +
   1.567 +	/*
   1.568 +	 * For the return value from this function, decipher whether
   1.569 +	 * service_deflate() wrote some data.
   1.570 +	 *
   1.571 +	 * If nothing was written, then we should stop sending now, by returning
   1.572 +	 * TX_EMPTY.  That will remove the input in tx_link.c that's calling this
   1.573 +	 * layer, which kind of sucks, because this could be the case a lot of the
   1.574 +	 * time when the whole buffer hasn't been filled up, leading to a removing
   1.575 +	 * and adding the input a lot.
   1.576 +	 *
   1.577 +	 * Otherwise, return TX_OK if something was sent to the lower layer.
   1.578 +	 */
   1.579 +	if (old_flushed == tx_deflate->nbytes_flushed)
   1.580 +		return TX_EMPTY;
   1.581 +
   1.582 +	return TX_OK;
   1.583 +}
   1.584 +
   1.585 +/*****************************************************************************/
   1.586 +
   1.587 +static BOOL tx_deflate_init (struct tx_layer *tx)
   1.588 +{
   1.589 +	struct tx_deflate *tx_deflate;
   1.590 +
   1.591 +	if (!(tx_deflate = malloc (sizeof(*tx_deflate))))
   1.592 +		return FALSE;
   1.593 +
   1.594 +	/* zlib documents these variables as needing initialization before
   1.595 +	 * deflateInit() */
   1.596 +	tx_deflate->z.zalloc = Z_NULL;
   1.597 +	tx_deflate->z.zfree  = Z_NULL;
   1.598 +	tx_deflate->z.opaque = Z_NULL;
   1.599 +
   1.600 +	if (deflateInit (&tx_deflate->z, Z_DEFAULT_COMPRESSION) != Z_OK)
   1.601 +	{
   1.602 +		FREE (tx_deflate);
   1.603 +		return FALSE;
   1.604 +	}
   1.605 +
   1.606 +	tx_deflate->buf              = NULL;
   1.607 +	tx_deflate->nagle_timer      = 0;
   1.608 +	tx_deflate->nbytes_in        = 0;
   1.609 +	tx_deflate->nbytes_out       = 0;
   1.610 +	tx_deflate->nbytes_flushed   = 0;
   1.611 +	tx_deflate->nbytes_unflushed = 0;
   1.612 +	tx_deflate->flushing         = FALSE;
   1.613 +	tx_deflate->delayed          = FALSE;
   1.614 +
   1.615 +	tx->udata = tx_deflate;
   1.616 +	return TRUE;
   1.617 +}
   1.618 +
   1.619 +static void tx_deflate_destroy (struct tx_layer *tx)
   1.620 +{
   1.621 +	struct tx_deflate *tx_deflate = tx->udata;
   1.622 +
   1.623 +	io_buf_free (tx_deflate->buf);
   1.624 +	timer_remove (tx_deflate->nagle_timer);
   1.625 +
   1.626 +	deflateEnd (&tx_deflate->z);
   1.627 +	FREE (tx_deflate);
   1.628 +}
   1.629 +
   1.630 +/*****************************************************************************/
   1.631 +
   1.632 +struct tx_layer_ops gt_tx_deflate_ops =
   1.633 +{
   1.634 +	tx_deflate_init,
   1.635 +	tx_deflate_destroy,
   1.636 +	tx_deflate_toggle,
   1.637 +	tx_deflate_queue,
   1.638 +	tx_deflate_ready,
   1.639 +	tx_deflate_enable,
   1.640 +	tx_deflate_disable,
   1.641 +};