+ int r, n = 0;
+ char *p;
+
+ again:
+ if (feof (fp)) {
+ *ret = safe_realloc (g, *ret, n + 1);
+ (*ret)[n] = '\0';
+ return n;
+ }
+
+ *ret = safe_realloc (g, *ret, n + BUFSIZ);
+ p = &(*ret)[n];
+ r = fread (p, 1, BUFSIZ, fp);
+ if (ferror (fp)) {
+ perrorf (g, "read");
+ return -1;
+ }
+ n += r;
+ goto again;
+}
+
+/* Test if option is supported by qemu command line (just by grepping
+ * the help text).
+ */
+static int
+qemu_supports (guestfs_h *g, const char *option)
+{
+ return g->qemu_help && strstr (g->qemu_help, option) != NULL;
+}
+
+int
+guestfs__wait_ready (guestfs_h *g)
+{
+ int r;
+ uint32_t size;
+ void *buf = NULL;
+
+ if (g->state == READY) return 0;
+
+ if (g->state == BUSY) {
+ error (g, _("qemu has finished launching already"));
+ return -1;
+ }
+
+ if (g->state != LAUNCHING) {
+ error (g, _("qemu has not been launched yet"));
+ return -1;
+ }
+
+ r = recv_from_daemon (g, &size, &buf);
+ free (buf);
+
+ if (r == -1) return -1;
+
+ if (size != GUESTFS_LAUNCH_FLAG) {
+ error (g, _("guestfs_wait_ready failed, see earlier error messages"));
+ return -1;
+ }
+
+ /* This is possible in some really strange situations, such as
+ * guestfsd starts up OK but then qemu immediately exits. Check for
+ * it because the caller is probably expecting to be able to send
+ * commands after this function returns.
+ */
+ if (g->state != READY) {
+ error (g, _("qemu launched and contacted daemon, but state != READY"));
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+guestfs__kill_subprocess (guestfs_h *g)
+{
+ if (g->state == CONFIG) {
+ error (g, _("no subprocess to kill"));
+ return -1;
+ }
+
+ if (g->verbose)
+ fprintf (stderr, "sending SIGTERM to process %d\n", g->pid);
+
+ kill (g->pid, SIGTERM);
+ if (g->recoverypid > 0) kill (g->recoverypid, 9);
+
+ return 0;
+}
+
+/* Access current state. */
+int
+guestfs__is_config (guestfs_h *g)
+{
+ return g->state == CONFIG;
+}
+
+int
+guestfs__is_launching (guestfs_h *g)
+{
+ return g->state == LAUNCHING;
+}
+
+int
+guestfs__is_ready (guestfs_h *g)
+{
+ return g->state == READY;
+}
+
+int
+guestfs__is_busy (guestfs_h *g)
+{
+ return g->state == BUSY;
+}
+
+int
+guestfs__get_state (guestfs_h *g)
+{
+ return g->state;
+}
+
+void
+guestfs_set_log_message_callback (guestfs_h *g,
+ guestfs_log_message_cb cb, void *opaque)
+{
+ g->log_message_cb = cb;
+ g->log_message_cb_data = opaque;
+}
+
+void
+guestfs_set_subprocess_quit_callback (guestfs_h *g,
+ guestfs_subprocess_quit_cb cb, void *opaque)
+{
+ g->subprocess_quit_cb = cb;
+ g->subprocess_quit_cb_data = opaque;
+}
+
+void
+guestfs_set_launch_done_callback (guestfs_h *g,
+ guestfs_launch_done_cb cb, void *opaque)
+{
+ g->launch_done_cb = cb;
+ g->launch_done_cb_data = opaque;
+}
+
+/*----------------------------------------------------------------------*/
+
+/* This is the code used to send and receive RPC messages and (for
+ * certain types of message) to perform file transfers. This code is
+ * driven from the generated actions (src/guestfs-actions.c). There
+ * are five different cases to consider:
+ *
+ * (1) A non-daemon function. There is no RPC involved at all, it's
+ * all handled inside the library.
+ *
+ * (2) A simple RPC (eg. "mount"). We write the request, then read
+ * the reply. The sequence of calls is:
+ *
+ * guestfs___set_busy
+ * guestfs___send
+ * guestfs___recv
+ * guestfs___end_busy
+ *
+ * (3) An RPC with FileOut parameters (eg. "upload"). We write the
+ * request, then write the file(s), then read the reply. The sequence
+ * of calls is:
+ *
+ * guestfs___set_busy
+ * guestfs___send
+ * guestfs___send_file (possibly multiple times)
+ * guestfs___recv
+ * guestfs___end_busy
+ *
+ * (4) An RPC with FileIn parameters (eg. "download"). We write the
+ * request, then read the reply, then read the file(s). The sequence
+ * of calls is:
+ *
+ * guestfs___set_busy
+ * guestfs___send
+ * guestfs___recv
+ * guestfs___recv_file (possibly multiple times)
+ * guestfs___end_busy
+ *
+ * (5) Both FileOut and FileIn parameters. There are no calls like
+ * this in the current API, but they would be implemented as a
+ * combination of cases (3) and (4).
+ *
+ * During all writes and reads, we also select(2) on qemu stdout
+ * looking for messages (guestfsd stderr and guest kernel dmesg), and
+ * anything received is passed up through the log_message_cb. This is
+ * also the reason why all the sockets are non-blocking. We also have
+ * to check for EOF (qemu died). All of this is handled by the
+ * functions send_to_daemon and recv_from_daemon.
+ */
+
+int
+guestfs___set_busy (guestfs_h *g)
+{
+ if (g->state != READY) {
+ error (g, _("guestfs_set_busy: called when in state %d != READY"),
+ g->state);
+ return -1;
+ }
+ g->state = BUSY;
+ return 0;
+}
+
+int
+guestfs___end_busy (guestfs_h *g)
+{
+ switch (g->state)
+ {
+ case BUSY:
+ g->state = READY;
+ break;
+ case CONFIG:
+ case READY:
+ break;
+
+ case LAUNCHING:
+ case NO_HANDLE:
+ default:
+ error (g, _("guestfs_end_busy: called when in state %d"), g->state);
+ return -1;
+ }
+ return 0;
+}
+
+/* This is called if we detect EOF, ie. qemu died. */
+static void
+child_cleanup (guestfs_h *g)
+{
+ if (g->verbose)
+ fprintf (stderr, "child_cleanup: %p: child process died\n", g);
+
+ /*kill (g->pid, SIGTERM);*/
+ if (g->recoverypid > 0) kill (g->recoverypid, 9);
+ waitpid (g->pid, NULL, 0);
+ if (g->recoverypid > 0) waitpid (g->recoverypid, NULL, 0);
+ close (g->fd[0]);
+ close (g->fd[1]);
+ close (g->sock);
+ g->fd[0] = -1;
+ g->fd[1] = -1;
+ g->sock = -1;
+ g->pid = 0;
+ g->recoverypid = 0;
+ g->start_t = 0;
+ g->state = CONFIG;
+ if (g->subprocess_quit_cb)
+ g->subprocess_quit_cb (g, g->subprocess_quit_cb_data);
+}
+
+static int
+read_log_message_or_eof (guestfs_h *g, int fd)
+{
+ char buf[BUFSIZ];