+/* Populate a sum_struct with values from the socket. This is
+ * called by both the sender and the receiver. */
+void read_sum_head(int f, struct sum_struct *sum)
+{
+ sum->count = read_int(f);
+ sum->blength = read_int(f);
+ if (sum->blength < 0 || sum->blength > MAX_BLOCK_SIZE) {
+ rprintf(FERROR, "Invalid block length %ld [%s]\n",
+ (long)sum->blength, who_am_i());
+ exit_cleanup(RERR_PROTOCOL);
+ }
+ sum->s2length = protocol_version < 27 ? csum_length : (int)read_int(f);
+ if (sum->s2length < 0 || sum->s2length > MD4_SUM_LENGTH) {
+ rprintf(FERROR, "Invalid checksum length %d [%s]\n",
+ sum->s2length, who_am_i());
+ exit_cleanup(RERR_PROTOCOL);
+ }
+ sum->remainder = read_int(f);
+ if (sum->remainder < 0 || sum->remainder > sum->blength) {
+ rprintf(FERROR, "Invalid remainder length %ld [%s]\n",
+ (long)sum->remainder, who_am_i());
+ exit_cleanup(RERR_PROTOCOL);
+ }
+}
+
+/* Send the values from a sum_struct over the socket. Set sum to
+ * NULL if there are no checksums to send. This is called by both
+ * the generator and the sender. */
+void write_sum_head(int f, struct sum_struct *sum)
+{
+ static struct sum_struct null_sum;
+
+ if (sum == NULL)
+ sum = &null_sum;
+
+ write_int(f, sum->count);
+ write_int(f, sum->blength);
+ if (protocol_version >= 27)
+ write_int(f, sum->s2length);
+ write_int(f, sum->remainder);
+}
+
+
+/**
+ * Sleep after writing to limit I/O bandwidth usage.
+ *
+ * @todo Rather than sleeping after each write, it might be better to
+ * use some kind of averaging. The current algorithm seems to always
+ * use a bit less bandwidth than specified, because it doesn't make up
+ * for slow periods. But arguably this is a feature. In addition, we
+ * ought to take the time used to write the data into account.
+ *
+ * During some phases of big transfers (file FOO is uptodate) this is
+ * called with a small bytes_written every time. As the kernel has to
+ * round small waits up to guarantee that we actually wait at least the
+ * requested number of microseconds, this can become grossly inaccurate.
+ * We therefore keep track of the bytes we've written over time and only
+ * sleep when the accumulated delay is at least 1 tenth of a second.
+ **/
+static void sleep_for_bwlimit(int bytes_written)
+{
+ static struct timeval prior_tv;
+ static long total_written = 0;
+ struct timeval tv, start_tv;
+ long elapsed_usec, sleep_usec;
+
+#define ONE_SEC 1000000L /* # of microseconds in a second */
+
+ if (!bwlimit)
+ return;
+
+ total_written += bytes_written;
+
+ gettimeofday(&start_tv, NULL);
+ if (prior_tv.tv_sec) {
+ elapsed_usec = (start_tv.tv_sec - prior_tv.tv_sec) * ONE_SEC
+ + (start_tv.tv_usec - prior_tv.tv_usec);
+ total_written -= elapsed_usec * bwlimit / (ONE_SEC/1024);
+ if (total_written < 0)
+ total_written = 0;
+ }
+
+ sleep_usec = total_written * (ONE_SEC/1024) / bwlimit;
+ if (sleep_usec < ONE_SEC / 10) {
+ prior_tv = start_tv;
+ return;
+ }
+
+ tv.tv_sec = sleep_usec / ONE_SEC;
+ tv.tv_usec = sleep_usec % ONE_SEC;
+ select(0, NULL, NULL, NULL, &tv);
+
+ gettimeofday(&prior_tv, NULL);
+ elapsed_usec = (prior_tv.tv_sec - start_tv.tv_sec) * ONE_SEC
+ + (prior_tv.tv_usec - start_tv.tv_usec);
+ total_written = (sleep_usec - elapsed_usec) * bwlimit / (ONE_SEC/1024);
+}
+
+
+/* Write len bytes to the file descriptor fd, looping as necessary to get
+ * the job done and also (in certain circumstnces) reading any data on
+ * msg_fd_in to avoid deadlock.
+ *
+ * This function underlies the multiplexing system. The body of the
+ * application never calls this function directly. */
+static void writefd_unbuffered(int fd,char *buf,size_t len)
+{
+ size_t n, total = 0;
+ fd_set w_fds, r_fds;
+ int maxfd, count, ret, using_r_fds;
+ struct timeval tv;
+
+ no_flush++;
+
+ while (total < len) {
+ FD_ZERO(&w_fds);
+ FD_SET(fd,&w_fds);
+ maxfd = fd;
+
+ if (msg_fd_in >= 0 && len-total >= contiguous_write_len) {
+ FD_ZERO(&r_fds);
+ FD_SET(msg_fd_in,&r_fds);
+ if (msg_fd_in > maxfd)
+ maxfd = msg_fd_in;
+ using_r_fds = 1;
+ } else
+ using_r_fds = 0;
+
+ tv.tv_sec = select_timeout;
+ tv.tv_usec = 0;
+
+ errno = 0;
+ count = select(maxfd + 1, using_r_fds ? &r_fds : NULL,
+ &w_fds, NULL, &tv);
+
+ if (count <= 0) {
+ if (count < 0 && errno == EBADF)
+ exit_cleanup(RERR_SOCKETIO);
+ check_timeout();
+ continue;
+ }
+
+ if (using_r_fds && FD_ISSET(msg_fd_in, &r_fds))
+ read_msg_fd();
+
+ if (!FD_ISSET(fd, &w_fds))
+ continue;
+
+ n = len - total;
+ if (bwlimit && n > bwlimit_writemax)
+ n = bwlimit_writemax;
+ ret = write(fd, buf + total, n);
+
+ if (ret <= 0) {
+ if (ret < 0) {
+ if (errno == EINTR)
+ continue;
+ if (errno == EWOULDBLOCK || errno == EAGAIN) {
+ msleep(1);
+ continue;
+ }
+ }
+
+ /* Don't try to write errors back across the stream. */
+ if (fd == sock_f_out)
+ close_multiplexing_out();
+ rsyserr(FERROR, errno,
+ "writefd_unbuffered failed to write %ld bytes: phase \"%s\" [%s]",
+ (long)len, io_write_phase, who_am_i());
+ /* If the other side is sending us error messages, try
+ * to grab any messages they sent before they died. */
+ while (fd == sock_f_out && io_multiplexing_in) {
+ set_io_timeout(30);
+ ignore_timeout = 0;
+ readfd_unbuffered(sock_f_in, io_filesfrom_buf,
+ sizeof io_filesfrom_buf);
+ }
+ exit_cleanup(RERR_STREAMIO);
+ }
+
+ total += ret;
+
+ if (fd == sock_f_out) {
+ if (io_timeout || am_generator)
+ last_io_out = time(NULL);
+ sleep_for_bwlimit(ret);
+ }
+ }
+
+ no_flush--;
+}
+
+
+/**
+ * Write an message to a multiplexed stream. If this fails then rsync
+ * exits.
+ **/
+static void mplex_write(enum msgcode code, char *buf, size_t len)
+{
+ char buffer[4096];
+ size_t n = len;
+
+ SIVAL(buffer, 0, ((MPLEX_BASE + (int)code)<<24) + len);
+
+ /* When the generator reads messages from the msg_fd_in pipe, it can
+ * cause output to occur down the socket. Setting contiguous_write_len
+ * prevents the reading of msg_fd_in once we actually start to write
+ * this sequence of data (though we might read it before the start). */
+ if (am_generator && msg_fd_in >= 0)
+ contiguous_write_len = len + 4;
+
+ if (n > sizeof buffer - 4)
+ n = sizeof buffer - 4;
+
+ memcpy(&buffer[4], buf, n);
+ writefd_unbuffered(sock_f_out, buffer, n+4);
+
+ len -= n;
+ buf += n;
+
+ if (len)
+ writefd_unbuffered(sock_f_out, buf, len);
+
+ if (am_generator && msg_fd_in >= 0)
+ contiguous_write_len = 0;
+}
+
+
+void io_flush(int flush_it_all)
+{
+ msg_list_push(flush_it_all);
+
+ if (!iobuf_out_cnt || no_flush)
+ return;
+
+ if (io_multiplexing_out)
+ mplex_write(MSG_DATA, iobuf_out, iobuf_out_cnt);
+ else
+ writefd_unbuffered(sock_f_out, iobuf_out, iobuf_out_cnt);
+ iobuf_out_cnt = 0;
+}
+
+
+static void writefd(int fd,char *buf,size_t len)
+{
+ if (fd == msg_fd_out) {
+ rprintf(FERROR, "Internal error: wrong write used in receiver.\n");
+ exit_cleanup(RERR_PROTOCOL);
+ }
+
+ if (fd == sock_f_out)
+ stats.total_written += len;
+
+ if (fd == write_batch_monitor_out) {
+ if ((size_t)write(batch_fd, buf, len) != len)
+ exit_cleanup(RERR_FILEIO);
+ }
+
+ if (!iobuf_out || fd != sock_f_out) {
+ writefd_unbuffered(fd, buf, len);
+ return;
+ }
+
+ while (len) {
+ int n = MIN((int)len, IO_BUFFER_SIZE - iobuf_out_cnt);
+ if (n > 0) {
+ memcpy(iobuf_out+iobuf_out_cnt, buf, n);
+ buf += n;
+ len -= n;
+ iobuf_out_cnt += n;
+ }
+
+ if (iobuf_out_cnt == IO_BUFFER_SIZE)
+ io_flush(NORMAL_FLUSH);
+ }
+}
+
+
+void write_shortint(int f, int x)
+{
+ uchar b[2];
+ b[0] = x;
+ b[1] = x >> 8;
+ writefd(f, (char *)b, 2);
+}
+
+
+void write_int(int f,int32 x)
+{
+ char b[4];
+ SIVAL(b,0,x);
+ writefd(f,b,4);
+}
+
+
+void write_int_named(int f, int32 x, const char *phase)
+{
+ io_write_phase = phase;
+ write_int(f, x);
+ io_write_phase = phase_unknown;
+}
+
+
+/*
+ * Note: int64 may actually be a 32-bit type if ./configure couldn't find any
+ * 64-bit types on this platform.
+ */
+void write_longint(int f, int64 x)
+{
+ char b[8];
+
+ if (x <= 0x7FFFFFFF) {
+ write_int(f, (int)x);
+ return;
+ }
+
+#if SIZEOF_INT64 < 8
+ rprintf(FERROR, "Integer overflow: attempted 64-bit offset\n");
+ exit_cleanup(RERR_UNSUPPORTED);
+#else
+ write_int(f, (int32)0xFFFFFFFF);
+ SIVAL(b,0,(x&0xFFFFFFFF));
+ SIVAL(b,4,((x>>32)&0xFFFFFFFF));
+
+ writefd(f,b,8);