#include <sys/un.h>
#endif
+#include <arpa/inet.h>
+#include <netinet/in.h>
+
#include "guestfs.h"
#include "guestfs-internal-actions.h"
#include "guestfs_protocol.h"
static void default_error_cb (guestfs_h *g, void *data, const char *msg);
static int send_to_daemon (guestfs_h *g, const void *v_buf, size_t n);
static int recv_from_daemon (guestfs_h *g, uint32_t *size_rtn, void **buf_rtn);
+static int accept_from_daemon (guestfs_h *g);
+static int check_peer_euid (guestfs_h *g, int sock, uid_t *rtn);
static void close_handles (void);
#define UNIX_PATH_MAX 108
int tries;
char *path, *pelem, *pend;
char *kernel = NULL, *initrd = NULL;
+ int null_vmchannel_sock;
char unixsock[256];
struct sockaddr_un addr;
if (test_qemu (g) == -1)
goto cleanup0;
- /* Make the vmchannel socket. */
- snprintf (unixsock, sizeof unixsock, "%s/sock", g->tmpdir);
- unlink (unixsock);
+ /* Choose which vmchannel implementation to use. */
+ if (qemu_supports (g, "-net user")) {
+ /* The "null vmchannel" implementation. Requires SLIRP (user mode
+ * networking in qemu) but no other vmchannel support. The daemon
+ * will connect back to a random port number on localhost.
+ */
+ struct sockaddr_in addr;
+ socklen_t addrlen = sizeof addr;
+
+ g->sock = socket (AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (g->sock == -1) {
+ perrorf (g, "socket");
+ goto cleanup0;
+ }
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons (0);
+ addr.sin_addr.s_addr = htonl (INADDR_LOOPBACK);
+ if (bind (g->sock, (struct sockaddr *) &addr, addrlen) == -1) {
+ perrorf (g, "bind");
+ goto cleanup0;
+ }
+
+ if (listen (g->sock, 256) == -1) {
+ perrorf (g, "listen");
+ goto cleanup0;
+ }
+
+ if (getsockname (g->sock, (struct sockaddr *) &addr, &addrlen) == -1) {
+ perrorf (g, "getsockname");
+ goto cleanup0;
+ }
+
+ if (fcntl (g->sock, F_SETFL, O_NONBLOCK) == -1) {
+ perrorf (g, "fcntl");
+ goto cleanup0;
+ }
+
+ null_vmchannel_sock = ntohs (addr.sin_port);
+ if (g->verbose)
+ fprintf (stderr, "null_vmchannel_sock = %d\n", null_vmchannel_sock);
+ } else {
+ /* Using some vmchannel impl. We need to create a local Unix
+ * domain socket for qemu to use.
+ */
+ snprintf (unixsock, sizeof unixsock, "%s/sock", g->tmpdir);
+ unlink (unixsock);
+ null_vmchannel_sock = 0;
+ }
if (pipe (wfd) == -1 || pipe (rfd) == -1) {
perrorf (g, "pipe");
if (qemu_supports (g, "-rtc-td-hack"))
add_cmdline (g, "-rtc-td-hack");
- if (qemu_supports (g, "-chardev") && qemu_supports (g, "guestfwd")) {
- /* New-style -net user,guestfwd=... syntax for guestfwd. See:
- *
- * http://git.savannah.gnu.org/cgit/qemu.git/commit/?id=c92ef6a22d3c71538fcc48fb61ad353f7ba03b62
- *
- * The original suggested format doesn't work, see:
- *
- * http://lists.gnu.org/archive/html/qemu-devel/2009-07/msg01654.html
- *
- * However Gerd Hoffman privately suggested to me using -chardev
- * instead, which does work.
- */
+ /* If qemu has SLIRP (user mode network) enabled then we can get
+ * away with "no vmchannel", where we just connect back to a random
+ * host port.
+ */
+ if (null_vmchannel_sock) {
+ add_cmdline (g, "-net");
+ add_cmdline (g, "user,vlan=0,net=10.0.2.0/8");
+
+ snprintf (buf, sizeof buf,
+ "guestfs_vmchannel=tcp:10.0.2.2:%d ", null_vmchannel_sock);
+ vmchannel = strdup (buf);
+ }
+
+ /* New-style -net user,guestfwd=... syntax for guestfwd. See:
+ *
+ * http://git.savannah.gnu.org/cgit/qemu.git/commit/?id=c92ef6a22d3c71538fcc48fb61ad353f7ba03b62
+ *
+ * The original suggested format doesn't work, see:
+ *
+ * http://lists.gnu.org/archive/html/qemu-devel/2009-07/msg01654.html
+ *
+ * However Gerd Hoffman privately suggested to me using -chardev
+ * instead, which does work.
+ */
+ else if (qemu_supports (g, "-chardev") && qemu_supports (g, "guestfwd")) {
snprintf (buf, sizeof buf,
"socket,id=guestfsvmc,path=%s,server,nowait", unixsock);
add_cmdline (g, "-net");
add_cmdline (g, buf);
- } else {
- /* Not guestfwd. HOPEFULLY this qemu uses the older -net channel
- * syntax, or if not then we'll get a quick failure.
- */
+
+ vmchannel = "guestfs_vmchannel=tcp:" GUESTFWD_ADDR ":" GUESTFWD_PORT " ";
+ }
+
+ /* Not guestfwd. HOPEFULLY this qemu uses the older -net channel
+ * syntax, or if not then we'll get a quick failure.
+ */
+ else {
snprintf (buf, sizeof buf,
"channel," GUESTFWD_PORT ":unix:%s,server,nowait", unixsock);
add_cmdline (g, buf);
add_cmdline (g, "-net");
add_cmdline (g, "user,vlan=0,net=10.0.2.0/8");
+
+ vmchannel = "guestfs_vmchannel=tcp:" GUESTFWD_ADDR ":" GUESTFWD_PORT " ";
}
add_cmdline (g, "-net");
add_cmdline (g, "nic,model=" NET_IF ",vlan=0");
- vmchannel = "guestfs_vmchannel=tcp:" GUESTFWD_ADDR ":" GUESTFWD_PORT " ";
#define LINUX_CMDLINE \
"panic=1 " /* force kernel to panic if daemon exits */ \
g->fd[0] = wfd[1]; /* stdin of child */
g->fd[1] = rfd[0]; /* stdout of child */
- /* Open the Unix socket. The vmchannel implementation that got
- * merged with qemu sucks in a number of ways. Both ends do
- * connect(2), which means that no one knows what, if anything, is
- * connected to the other end, or if it becomes disconnected. Even
- * worse, we have to wait some indeterminate time for qemu to create
- * the socket and connect to it (which happens very early in qemu's
- * start-up), so any code that uses vmchannel is inherently racy.
- * Hence this silly loop.
- */
- g->sock = socket (AF_UNIX, SOCK_STREAM, 0);
- if (g->sock == -1) {
- perrorf (g, "socket");
- goto cleanup1;
- }
+ if (null_vmchannel_sock) {
+ int sock = -1;
+ uid_t uid;
- if (fcntl (g->sock, F_SETFL, O_NONBLOCK) == -1) {
- perrorf (g, "fcntl");
- goto cleanup2;
- }
+ /* Null vmchannel implementation: We listen on g->sock for a
+ * connection. The connection could come from any local process
+ * so we must check it comes from the appliance (or at least
+ * from our UID) for security reasons.
+ */
+ while (sock == -1) {
+ sock = accept_from_daemon (g);
+ if (sock == -1)
+ goto cleanup1;
+
+ if (check_peer_euid (g, sock, &uid) == -1)
+ goto cleanup1;
+ if (uid != geteuid ()) {
+ fprintf (stderr,
+ "libguestfs: warning: unexpected connection from UID %d to port %d\n",
+ uid, null_vmchannel_sock);
+ close (sock);
+ continue;
+ }
+ }
- addr.sun_family = AF_UNIX;
- strncpy (addr.sun_path, unixsock, UNIX_PATH_MAX);
- addr.sun_path[UNIX_PATH_MAX-1] = '\0';
+ if (fcntl (sock, F_SETFL, O_NONBLOCK) == -1) {
+ perrorf (g, "fcntl");
+ goto cleanup1;
+ }
- tries = 100;
- /* Always sleep at least once to give qemu a small chance to start up. */
- usleep (10000);
- while (tries > 0) {
- r = connect (g->sock, (struct sockaddr *) &addr, sizeof addr);
- if ((r == -1 && errno == EINPROGRESS) || r == 0)
- goto connected;
+ close (g->sock);
+ g->sock = sock;
+ } else {
+ /* Other vmchannel. Open the Unix socket.
+ *
+ * The vmchannel implementation that got merged with qemu sucks in
+ * a number of ways. Both ends do connect(2), which means that no
+ * one knows what, if anything, is connected to the other end, or
+ * if it becomes disconnected. Even worse, we have to wait some
+ * indeterminate time for qemu to create the socket and connect to
+ * it (which happens very early in qemu's start-up), so any code
+ * that uses vmchannel is inherently racy. Hence this silly loop.
+ */
+ g->sock = socket (AF_UNIX, SOCK_STREAM, 0);
+ if (g->sock == -1) {
+ perrorf (g, "socket");
+ goto cleanup1;
+ }
- if (errno != ENOENT)
- perrorf (g, "connect");
- tries--;
- usleep (100000);
- }
+ if (fcntl (g->sock, F_SETFL, O_NONBLOCK) == -1) {
+ perrorf (g, "fcntl");
+ goto cleanup1;
+ }
- error (g, _("failed to connect to vmchannel socket"));
- goto cleanup2;
+ addr.sun_family = AF_UNIX;
+ strncpy (addr.sun_path, unixsock, UNIX_PATH_MAX);
+ addr.sun_path[UNIX_PATH_MAX-1] = '\0';
+
+ tries = 100;
+ /* Always sleep at least once to give qemu a small chance to start up. */
+ usleep (10000);
+ while (tries > 0) {
+ r = connect (g->sock, (struct sockaddr *) &addr, sizeof addr);
+ if ((r == -1 && errno == EINPROGRESS) || r == 0)
+ goto connected;
+
+ if (errno != ENOENT)
+ perrorf (g, "connect");
+ tries--;
+ usleep (100000);
+ }
+
+ error (g, _("failed to connect to vmchannel socket"));
+ goto cleanup1;
+
+ connected: ;
+ }
- connected:
g->state = LAUNCHING;
/* Wait for qemu to start and to connect back to us via vmchannel and
if (size != GUESTFS_LAUNCH_FLAG) {
error (g, _("guestfs_launch failed, see earlier error messages"));
- goto cleanup2;
+ goto cleanup1;
}
/* This is possible in some really strange situations, such as
*/
if (g->state != READY) {
error (g, _("qemu launched and contacted daemon, but state != READY"));
- goto cleanup2;
+ goto cleanup1;
}
return 0;
- cleanup2:
- close (g->sock);
-
cleanup1:
close (wfd[1]);
close (rfd[0]);
if (g->recoverypid > 0) waitpid (g->recoverypid, NULL, 0);
g->fd[0] = -1;
g->fd[1] = -1;
- g->sock = -1;
g->pid = 0;
g->recoverypid = 0;
g->start_t = 0;
cleanup0:
+ if (g->sock >= 0) {
+ close (g->sock);
+ g->sock = -1;
+ }
+ g->state = CONFIG;
free (kernel);
free (initrd);
return -1;
return g->qemu_help && strstr (g->qemu_help, option) != NULL;
}
+/* Check the peer effective UID for a TCP socket. Ideally we'd like
+ * SO_PEERCRED for a loopback TCP socket. This isn't possible on
+ * Linux (but it is on Solaris!) so we read /proc/net/tcp instead.
+ */
+static int
+check_peer_euid (guestfs_h *g, int sock, uid_t *rtn)
+{
+ struct sockaddr_in peer;
+ socklen_t addrlen = sizeof peer;
+
+ if (getpeername (sock, (struct sockaddr *) &peer, &addrlen) == -1) {
+ perrorf (g, "getpeername");
+ return -1;
+ }
+
+ if (peer.sin_family != AF_INET ||
+ ntohl (peer.sin_addr.s_addr) != INADDR_LOOPBACK) {
+ error (g, "check_peer_euid: unexpected connection from non-IPv4, non-loopback peer (family = %d, addr = %s)",
+ peer.sin_family, inet_ntoa (peer.sin_addr));
+ return -1;
+ }
+
+ struct sockaddr_in our;
+ addrlen = sizeof our;
+ if (getsockname (sock, (struct sockaddr *) &our, &addrlen) == -1) {
+ perrorf (g, "getsockname");
+ return -1;
+ }
+
+ FILE *fp = fopen ("/proc/net/tcp", "r");
+ if (fp == NULL) {
+ perrorf (g, "/proc/net/tcp");
+ return -1;
+ }
+
+ char line[256];
+ if (fgets (line, sizeof line, fp) == NULL) { /* Drop first line. */
+ error (g, "unexpected end of file in /proc/net/tcp");
+ fclose (fp);
+ return -1;
+ }
+
+ while (fgets (line, sizeof line, fp) != NULL) {
+ unsigned line_our_addr, line_our_port, line_peer_addr, line_peer_port;
+ int dummy0, dummy1, dummy2, dummy3, dummy4, dummy5, dummy6;
+ int line_uid;
+
+ if (sscanf (line, "%d:%08X:%04X %08X:%04X %02X %08X:%08X %02X:%08X %08X %d",
+ &dummy0,
+ &line_our_addr, &line_our_port,
+ &line_peer_addr, &line_peer_port,
+ &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, &dummy6,
+ &line_uid) == 12) {
+ /* Note about /proc/net/tcp: local_address and rem_address are
+ * always in network byte order. However the port part is
+ * always in host byte order.
+ *
+ * The sockname and peername that we got above are in network
+ * byte order. So we have to byte swap the port but not the
+ * address part.
+ */
+ if (line_our_addr == our.sin_addr.s_addr &&
+ line_our_port == ntohs (our.sin_port) &&
+ line_peer_addr == peer.sin_addr.s_addr &&
+ line_peer_port == ntohs (peer.sin_port)) {
+ *rtn = line_uid;
+ fclose (fp);
+ return 0;
+ }
+ }
+ }
+
+ error (g, "check_peer_euid: no matching TCP connection found in /proc/net/tcp");
+ fclose (fp);
+ return -1;
+}
+
/* You had to call this function after launch in versions <= 1.0.70,
* but it is now a no-op.
*/
FD_SET (g->sock, &rset); /* Read socket for cancellation & EOF. */
FD_SET (g->sock, &wset); /* Write to socket to send the data. */
- int max_fd = g->sock > g->fd[1] ? g->sock : g->fd[1];
+ int max_fd = MAX (g->sock, g->fd[1]);
while (n > 0) {
rset2 = rset;
FD_SET (g->fd[1], &rset); /* Read qemu stdout for log messages & EOF. */
FD_SET (g->sock, &rset); /* Read socket for data & EOF. */
- int max_fd = g->sock > g->fd[1] ? g->sock : g->fd[1];
+ int max_fd = MAX (g->sock, g->fd[1]);
*size_rtn = 0;
*buf_rtn = NULL;
return 0;
}
+/* This is very much like recv_from_daemon above, but g->sock is
+ * a listening socket and we are accepting a new connection on
+ * that socket instead of reading anything. Returns the newly
+ * accepted socket.
+ */
+static int
+accept_from_daemon (guestfs_h *g)
+{
+ fd_set rset, rset2;
+
+ if (g->verbose)
+ fprintf (stderr,
+ "accept_from_daemon: %p g->state = %d\n", g, g->state);
+
+ FD_ZERO (&rset);
+
+ FD_SET (g->fd[1], &rset); /* Read qemu stdout for log messages & EOF. */
+ FD_SET (g->sock, &rset); /* Read socket for accept. */
+
+ int max_fd = MAX (g->sock, g->fd[1]);
+ int sock = -1;
+
+ while (sock == -1) {
+ rset2 = rset;
+ int r = select (max_fd+1, &rset2, NULL, NULL, NULL);
+ if (r == -1) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ perrorf (g, "select");
+ return -1;
+ }
+
+ if (FD_ISSET (g->fd[1], &rset2)) {
+ if (read_log_message_or_eof (g, g->fd[1]) == -1)
+ return -1;
+ }
+ if (FD_ISSET (g->sock, &rset2)) {
+ sock = accept (g->sock, NULL, NULL);
+ if (sock == -1) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ perrorf (g, "accept");
+ return -1;
+ }
+ }
+ }
+
+ return sock;
+}
+
int
guestfs___send (guestfs_h *g, int proc_nr, xdrproc_t xdrp, char *args)
{