- Entirely command line driven.
- - No required dependencies except libvirt and ssh.
-
- - Only designed for tiny clusters (up to around 10 hosts).
+ - No required dependencies except ansible (on the client only),
+ libvirt and ssh.
- Nothing to install on the nodes except libvirtd and sshd.
- - Single, simple configuration file.
+ - Only designed for small clusters (up to around 10-20 hosts).
+
+ - Simple configuration.
Example commands
----------------------------------------------------------------------
- Edit the configuration file (mclu.conf).
+ - Add the hosts group (usually called '[cluster]') to /etc/ansible/hosts.
+ You can set $ANSIBLE_HOSTS to change the location of this file.
+
- Run commands such as:
./run status
- Edit the configuration file (/etc/mclu/mclu.conf).
+ - Add the hosts group (usually called '[cluster]') to /etc/ansible/hosts.
+
- Run commands such as:
mclu status
AC_MSG_ERROR([SSH client is required])
fi
+dnl Ansible client (required).
+dnl Actually the ansible command line tool is not used, but we do use
+dnl the Python library. XXX Should check for that instead.
+AC_PATH_PROG([ANSIBLE],[ansible],[no])
+if test "x$ANSIBLE" = "xno"; then
+ AC_MSG_ERROR([ansible is required])
+fi
+
dnl Wake-on-LAN client (optional).
AC_PATH_PROG([WOL],[wol],[no])
import sys
import os
import re
-import libvirt
-
-# Helper function to ping all the nodes.
-def ping_nodes (nodes):
- for node in nodes.values():
- node.ping ()
-# Helper function to get node objects from a list of node names.
-def get_nodes_by_name (all_nodes, names, all):
- if not all:
- return map (lambda name : get_node_by_name (all_nodes, name), names)
- else:
- return all_nodes.values()
-
-def get_node_by_name (nodes, name):
- if name in nodes:
- return nodes[name]
- else:
- sys.exit ("error: node does not exist: %s" % name)
+import libvirt
+import ansible.runner
+import ansible.inventory
# Get separate list of running and inactive guests.
-def get_all_guests (c, nodes):
+def get_all_guests (c):
running = {}
inactive = {}
# Find running guests.
- for node in nodes:
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'virt',
+ module_args = 'command=list_vms', # XXX ignore state=shutdown
+ pattern = c['nodes_group'],
+ )
+ data = runner.run ()
+
+ for node in data['contacted'].keys():
# Get the flat list of guests from this node:
- doms = node.guests ()
+ vms = data['contacted'][node]['list_vms']
# Store which node the guest is running on, and turn list into a map.
- for dom in doms:
- name = dom.name()
- if name in running:
- sys.exit ("error: virtual machine %s is running on two nodes!" % name)
- running[name] = { 'dom' : dom, 'node' : node }
+ for vm in vms:
+ if node in running:
+ sys.exit ("error: virtual machine %s is running on two nodes!" % node)
+ running[vm] = { 'vm' : vm, 'node' : node }
# Find inactive guests (XML configuration files).
- for name in get_guest_configs (c, nodes):
+ for name in get_guest_configs (c):
if name not in running:
inactive[name] = name
return running, inactive
# Get the names of guests from the XML configuration files in xmls_dir.
-def get_guest_configs (c, nodes):
+def get_guest_configs (c):
names = []
for filename in sorted (os.listdir (c['xmls_dir'])):
m = re.search (r'^(.*)\.xml$', filename)
names.append (m.group (1))
return names
-# Convert virDomainState to string.
-# Copied from virt-manager.
-def pretty_run_status (status):
- if status == libvirt.VIR_DOMAIN_RUNNING:
- return "running"
- elif status == libvirt.VIR_DOMAIN_PAUSED:
- return "paused"
- elif status == libvirt.VIR_DOMAIN_SHUTDOWN:
- return "shutting down"
- elif status == libvirt.VIR_DOMAIN_SHUTOFF:
- return "shutoff"
- elif status == libvirt.VIR_DOMAIN_CRASHED:
- return "crashed"
- elif status == libvirt.VIR_DOMAIN_PMSUSPENDED:
- return "suspended"
-
# Start a guest running on a given node. The node must not be
# running anywhere already.
-def start_guest (c, node, guest_name):
+def start_guest (c, node_name, guest_name):
fp = open ("%s/%s.xml" % (c['xmls_dir'], guest_name), "r")
xml = fp.read ()
fp.close ()
- conn = node.get_connection()
+ conn = libvirt.open (uri_of_node (node_name))
+ if conn == None:
+ sys.exit ("error: could not open a libvirt connection to %s" %
+ node_name)
conn.createXML (xml)
-def pick_any_node_which_is_up (nodes):
- node = None
- for n in nodes.values():
- if n.ping ():
- node = n
- break
- if not node:
- sys.exit ("error: no nodes are up, use mclu on [node|--all]")
- return node
+def pick_any_node_which_is_up (c):
+ inventory = ansible.inventory.Inventory ()
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'ping',
+ inventory = inventory,
+ pattern = c['nodes_group'],
+ )
+ data = runner.run ()
+ if len (data['contacted']) == 0:
+ sys.exit ("error: no nodes are up, use mclu on")
+ return data['contacted'].keys()[0]
+
+# XXX Make this configurable.
+def uri_of_node (node_name):
+ return "qemu+ssh://root@%s/system" % node_name
# nodes. But it must be available on the machine running 'mclu'.
xmls_dir = %(config_dir)s/xmls/
-# The nodes section lists all nodes. The keys don't need to be
-# sequential, but must start with 'node'. The values are the short
-# names of the nodes. If a node goes out of service permanently, you
-# can just comment it out here.
-[nodes]
-node0 = ham0
-node1 = ham1
-node2 = ham2
-node3 = ham3
-
-# You need one section per node listed in [nodes].
-# Possible fields are:
-# host
-# Hostname or IP address of the node, if omitted it uses the
-# node name as the hostname
-# mac
-# MAC (ethernet) address (only used for wake-on-LAN)
-# uri
-# Libvirt URI used to access the remote libvirt daemon running
-# on the node. The default is: qemu+ssh://root@%(host)s/system
-# Note that you must allow passwordless root ssh access (eg.
-# using ssh-agent).
-[ham0]
-host = ham0.home.annexia.org
-mac = 74:d4:35:55:85:3f
-#uri = qemu+ssh://root@%(host)s/system
-[ham1]
-host = ham1.home.annexia.org
-mac = 74:d4:35:51:ab:86
-#uri = qemu+ssh://root@%(host)s/system
-[ham2]
-host = ham2.home.annexia.org
-mac = 74:d4:35:55:82:96
-#uri = qemu+ssh://root@%(host)s/system
-[ham3]
-host = ham3.home.annexia.org
-mac = 74:d4:35:55:84:b4
-#uri = qemu+ssh://root@%(host)s/system
+# The name of the ansible group which contains the list of nodes
+# in the cluster. You have to edit /etc/ansible/hosts and add a
+# section:
+#
+# [cluster]
+# ham0 mac=74:d4:35:55:85:3f
+# ham1 mac=74:d4:35:51:ab:86
+# ham2 mac=74:d4:35:55:82:96
+# ham3 mac=74:d4:35:55:84:b4
+#
+# The mac=... (MAC addresses) are optional, to support wake-on-LAN.
+#
+# If you don't want to edit /etc/ansible/hosts then you can export
+# $ANSIBLE_HOSTS to name another file instead.
+nodes_group = cluster
import sys
import config
-from node import Node
+import mclu_build
+import mclu_console
+import mclu_import
+import mclu_info
+import mclu_list
+import mclu_migrate
+import mclu_off
+import mclu_on
+import mclu_reboot
+import mclu_start
+import mclu_status
+import mclu_stop
+import mclu_viewer
parser = argparse.ArgumentParser (
prog='mclu',
# Add subcommands.
subparsers = parser.add_subparsers ()
-import mclu_build
mclu_build.cmdline (subparsers)
-import mclu_console
mclu_console.cmdline (subparsers)
-import mclu_import
mclu_import.cmdline (subparsers)
-import mclu_info
mclu_info.cmdline (subparsers)
-import mclu_list
mclu_list.cmdline (subparsers)
-import mclu_migrate
mclu_migrate.cmdline (subparsers)
-import mclu_off
mclu_off.cmdline (subparsers)
-import mclu_on
mclu_on.cmdline (subparsers)
-import mclu_reboot
mclu_reboot.cmdline (subparsers)
-import mclu_start
mclu_start.cmdline (subparsers)
-import mclu_status
mclu_status.cmdline (subparsers)
-import mclu_stop
mclu_stop.cmdline (subparsers)
-import mclu_viewer
mclu_viewer.cmdline (subparsers)
args = parser.parse_args()
# section, so we have to rely on setting names not overlapping.
conf_defaults = {
"home" : os.getenv ("HOME"),
- "host" : "SET.THIS.IN.MCLU.CONF",
"config_dir" : config_dir,
- "uri" : "qemu+ssh://root@%(host)s/system",
}
# Read the configuration file.
xmls_dir = conf.get ("global", "xmls_dir")
if not os.path.isdir (xmls_dir):
sys.exit ("configuration error: [globals] 'xmls_dir' (%s) directory does not exist", xmls_dir)
-
-# Get the list of node names.
-node_names = conf.items ("nodes")
-node_names = filter (lambda (x, _) : re.search (r'^node', x), node_names)
-node_names = [ value for _, value in node_names ]
-if not node_names:
- sys.exit ("configuration error: [nodes] section in configuration file is empty")
-
-# Get information about each node.
-nodes = {}
-for node_name in node_names:
- host = conf.get (node_name, "host")
- if not host:
- host = node_name
- mac = conf.get (node_name, "mac")
- uri = conf.get (node_name, "uri")
- node = Node (node_name, host, mac, uri)
- nodes[node_name] = node
+nodes_group = conf.get ("global", "nodes_group")
# A config dict with less-used configuration settings.
c = {
"config_file" : args.f.name,
"config_dir" : config_dir,
"images_dir" : images_dir,
- "node_names" : node_names,
+ "nodes_group" : nodes_group,
"xmls_dir" : xmls_dir,
"conf" : conf,
}
# Run the subcommand.
-args.run (c, args, nodes)
+args.run (c, args)
BuildRequires: python-devel
BuildRequires: libvirt-python
BuildRequires: /usr/bin/ssh
+BuildRequires: ansible
Requires: libvirt-python
Requires: /usr/bin/ssh
+Requires: ansible
# These are optional: comment them out to get a less functional mclu.
BuildRequires: /usr/bin/wol
)
p.set_defaults (run=run)
-def run (c, args, nodes):
+def run (c, args):
# Did the user request a particular node? If not, we'll run it
# on any node which is up.
m = re.match (r'^(.*):(.*)$', args.name)
if m:
node_name = m.group (1)
vm_name = m.group (2)
- if node_name in nodes:
- node = nodes[node_name]
- if not node.ping ():
- sys.exit ("error: requested node (%s) is not up, use mclu on %s" %
- (node_name, node_name))
- else:
- sys.exit ("error: requested node (%s) does not exist" % node_name)
else:
- node = lib.pick_any_node_which_is_up (nodes)
+ node = lib.pick_any_node_which_is_up (c)
vm_name = args.name
# Get all the guests, so we can tell if the name is a duplicate.
- running, inactive = lib.get_all_guests (c, nodes.values ())
+ running, inactive = lib.get_all_guests (c)
if vm_name in running or vm_name in inactive:
sys.exit ("error: node name (%s) already exists" % vm_name)
fp.close ()
# Start the guest.
- lib.start_guest (c, node, vm_name)
- print "guest built and started on node %s" % node.name
+ lib.start_guest (c, node_name, vm_name)
+ print "guest built and started on node %s" % node_name
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- running, _ = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ running, _ = lib.get_all_guests (c)
m = re.match (r'^(.*):(.*)$', args.vm)
- node_name = None
if m:
# We don't actually care about the node, but we check it
# is the expected one below.
- node_name = m.group (1)
+ node_name_check = m.group (1)
vm_name = m.group (2)
else:
+ node_name_check = None
vm_name = args.vm
if vm_name not in running:
sys.exit ("error: vm %s not found or not running" % vm_name)
- dom = running[vm_name]['dom']
- node = running[vm_name]['node']
+ vm = running[vm_name]['vm']
+ node_name = running[vm_name]['node']
- if node_name and node.name != node_name:
+ if node_name_check and node_name != node_name_check:
sys.exit ("error: vm %s is not running on node %s, did you mean %s:%s ?" %
- (vm_name, node_name, node.name, vm_name))
+ (vm_name, node_name_check, node_name, vm_name))
# Run the virsh console command.
- subprocess.call (["virsh", "-c", node.uri, "console", vm_name])
+ subprocess.call (["virsh",
+ "-c", lib.uri_of_node (node_name),
+ "console",
+ vm_name])
)
p.set_defaults (run=run)
-def run (c, args, nodes):
+def run (c, args):
# Did the user request a particular node? If not, we'll run it
# on any node which is up.
m = re.match (r'^(.*):(.*)$', args.name)
if m:
node_name = m.group (1)
vm_name = m.group (2)
- if node_name in nodes:
- node = nodes[node_name]
- if not node.ping ():
- sys.exit ("error: requested node (%s) is not up, use mclu on %s" %
- (node_name, node_name))
- else:
- sys.exit ("error: requested node (%s) does not exist" % node_name)
else:
- node = lib.pick_any_node_which_is_up (nodes)
+ node = lib.pick_any_node_which_is_up (c)
vm_name = args.name
# Get all the guests, so we can tell if the name is a duplicate.
- running, inactive = lib.get_all_guests (c, nodes.values ())
+ running, inactive = lib.get_all_guests (c)
if vm_name in running or vm_name in inactive:
sys.exit ("error: node name (%s) already exists" % vm_name)
fp.close ()
# Start the guest.
- lib.start_guest (c, node, vm_name)
- print "guest imported and started on node %s" % node.name
+ lib.start_guest (c, node_name, vm_name)
+ print "guest imported and started on node %s" % node_name
)
p.set_defaults (run=run)
-def run (c, args, nodes):
+def run (c, args):
show_running = True
show_inactive = True
if not args.running or not args.inactive:
show_running = False
show_inactive = True
- running, inactive = lib.get_all_guests (c, nodes.values ())
+ running, inactive = lib.get_all_guests (c)
if show_running:
for guest in running.values():
- node_name = guest['node'].name
- dom_name = guest['dom'].name()
- dom_state = lib.pretty_run_status (guest['dom'].state()[0])
- print "%s:%s\t%s" % (node_name, dom_name, dom_state)
+ node_name = guest['node']
+ dom_name = guest['vm']
+ print "%s:%s\trunning" % (node_name, dom_name)
if show_inactive:
for name in inactive.values():
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- running, _ = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ running, _ = lib.get_all_guests (c)
# Identify the VMs to be migrated.
migrate_vms = []
for vm in running.values():
- node = vm['node']
- dom = vm['dom']
+ node_name = vm['node']
+ vm_name = vm['vm']
# Form the name of this VM (eg. "ham0:vm") so we can match it
# against the wildcards (eg. "ham0:*")
- name = node.name + ":" + dom.name()
+ name = node_name + ":" + vm_name
for wc in args.wildcards:
- if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (dom.name(), wc):
+ if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (vm_name, wc):
migrate_vms.append (vm)
if not migrate_vms:
# Get destination node. It can be written either 'dest' or 'dest:'
m = re.match (r'(.*):$', args.dest)
if m:
- args.dest = m.group (1)
+ dest = m.group (1)
+ else:
+ dest = args.dest
- if args.dest not in nodes:
- sys.exit ("error: destination node (%s) does not exist" % args.dest)
- dest = nodes[args.dest]
-
- dconn = libvirt.open (dest.uri)
+ dconn = libvirt.open (lib.uri_of_node (dest))
if dconn == None:
- sys.exit ("error: could not open a libvirt connection to %s (URI: %s)" %
- (dest.host, dest.uri))
+ sys.exit ("error: could not open a libvirt connection to %s" % dest)
for vm in migrate_vms:
- dom = vm['dom']
+ sconn = libvirt.open (lib.uri_of_node (vm['node']))
+ if sconn == None:
+ sys.exit ("error: could not open a libvirt connection to %s" %
+ vm['node'])
+ dom = sconn.lookupByName (vm['vm'])
dom.migrate (dconn, libvirt.VIR_MIGRATE_LIVE)
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import sys
+import argparse
import os
import subprocess
+import sys
+import time
+
+import ansible.inventory
+import ansible.runner
-import argparse
import lib
def cmdline (subparsers):
help='power off a node (or nodes)'
)
p.add_argument (
- '--all', action='store_const', const=True,
- help='power off all nodes'
- )
- p.add_argument (
- 'nodes', nargs='*',
- help='node name to be powered off'
+ 'wildcard',
+ help='node name(s) to be powered off (wildcard may be used)'
)
p.set_defaults (run=run)
-def run (c, args, all_nodes):
- nodes = lib.get_nodes_by_name (all_nodes, args.nodes, args.all)
+def run (c, args):
+ inventory = ansible.inventory.Inventory ()
+ inventory.subset (subset_pattern = args.wildcard)
+ nodes = inventory.get_hosts (c['nodes_group'])
# Check the nodes have no guests.
+ running, _ = lib.get_all_guests (c)
for node in nodes:
- guests = node.guests ()
- if guests != []:
- names = map (lambda x : x.name(), guests)
- sys.exit ("error: node %s has running guests %s, migrate them off or stop them first" % (node.name, names))
+ for vm in running.values():
+ if vm['node'] == node.name:
+ sys.exit ("error: node %s has running guests, migrate them off or stop them first" % node.name)
# Power them off.
- up = 0
- for node in nodes:
- if node.ping (force=True):
- node.shutdown ()
- up += 1
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'command',
+ module_args = '/sbin/poweroff',
+ inventory = inventory,
+ pattern = c['nodes_group'],
+ )
+ data = runner.run ()
+ # Wait for them to power off.
pings = 60
- while pings > 0 and up > 0:
- if not node.up:
- if not node.ping (force=True):
- up -= 1
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'ping',
+ inventory = inventory,
+ pattern = c['nodes_group'],
+ )
+ while pings > 0:
+ data = runner.run ()
+ if len (data['contacted']) == 0:
+ break
pings -= 1
+ time.sleep (1)
if pings == 0:
sys.exit ('warning: some nodes did not power off')
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-import sys
+import argparse
import os
+import sys
+import time
import subprocess
-import argparse
+import ansible.inventory
+import ansible.runner
+
+import config
import lib
def cmdline (subparsers):
help='power on a node (or nodes)'
)
p.add_argument (
- '--all', action='store_const', const=True,
- help='wake up all nodes'
- )
- p.add_argument (
- 'nodes', nargs='*',
- help='node name to be woken up'
+ 'wildcard',
+ help='node name(s) to be woken up (wildcard may be used)'
)
p.set_defaults (run=run)
-def run (c, args, all_nodes):
- nodes = lib.get_nodes_by_name (all_nodes, args.nodes, args.all)
+def run (c, args):
+ inventory = ansible.inventory.Inventory ()
+ inventory.subset (subset_pattern = args.wildcard)
+ nodes = inventory.get_hosts (c['nodes_group'])
# Wake them up.
- up = 0
for node in nodes:
- if node.ping (force=True):
- up += 1
+ vars = node.get_variables()
+ if 'mac' in vars:
+ devnull = open (os.devnull, "w")
+ subprocess.check_call ([config.WOL, vars['mac']],
+ stdout = devnull, stderr = devnull)
else:
- node.wake()
+ print "warning: no mac= line in ansible hosts file for %s (ignored)" % node.name
# Wait for them to come up.
pings = 30
- while pings > 0 and up < len (nodes):
- for node in nodes:
- if not node.up:
- if node.ping (force=True):
- up += 1
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'ping',
+ inventory = inventory,
+ pattern = c['nodes_group'],
+ )
+ while pings > 0:
+ data = runner.run ()
+ if len (data['dark']) == 0:
+ break
pings -= 1
+ time.sleep (1)
if pings == 0:
sys.exit ('warning: some nodes did not wake up')
import argparse
import fnmatch
+import subprocess
+
import libvirt
import lib
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- running, _ = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ running, _ = lib.get_all_guests (c)
for vm in running.values():
- node = vm['node']
- dom = vm['dom']
+ node_name = vm['node']
+ vm_name = vm['vm']
# Form the name of this VM (eg. "ham0:vm") so we can match it
# against the wildcards (eg. "ham0:*")
- name = node.name + ":" + dom.name()
+ name = node_name + ":" + vm_name
for wc in args.wildcards:
- if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (dom.name(), wc):
+ if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (vm_name, wc):
if args.force:
- dom.reset(0)
+ subprocess.check_call (["virsh",
+ "-c", lib.uri_of_node (node_name),
+ "reset", vm_name])
else:
- dom.reboot(0)
+ subprocess.check_call (["virsh",
+ "-c", lib.uri_of_node (node_name),
+ "reboot", vm_name])
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- _, inactive = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ _, inactive = lib.get_all_guests (c)
# User supplied a list of node:VMs.
for a in args.vms:
if m:
node_name = m.group (1)
wc = m.group (2)
- if node_name not in nodes:
- sys.exit ("error: node %s does not exist" % node_name)
- node = nodes[node_name]
else:
wc = a
- node = lib.pick_any_node_which_is_up (nodes)
+ node_name = lib.pick_any_node_which_is_up (c)
started = []
for vm_name in inactive:
if fnmatch.fnmatch (vm_name, wc):
if args.viewer:
- subprocess.Popen ([config.VIRT_VIEWER, "-c",
- node.uri, vm_name],
+ subprocess.Popen ([config.VIRT_VIEWER,
+ "-c", lib.uri_of_node (node_name),
+ vm_name],
close_fds=True)
- lib.start_guest (c, node, vm_name)
+ lib.start_guest (c, node_name, vm_name)
started.append (vm_name)
if not started:
import argparse
+import ansible.runner
+
import lib
def cmdline (subparsers):
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- for node_name in sorted (nodes.keys ()):
- node = nodes[node_name]
- print "%s (%s)" % (node_name, node.host),
- if node.ping():
- print "\tup",
- if node.ssh_ping():
- print "\tssh: OK",
- if node.libvirt_ping():
- print "\tlibvirt: OK"
- else:
- print "\tlibvirt: dead"
- else:
- print "\tssh: dead"
- else:
- print "\tdown"
+def run (c, args):
+ runner = ansible.runner.Runner (
+ remote_user = 'root',
+ module_name = 'ping',
+ pattern = c['nodes_group'],
+ )
+ data = runner.run ()
+ for name in sorted (data['contacted']):
+ print "%s\tup" % name
+ for name in sorted (data['dark']):
+ print "%s\tdown" % name
import argparse
import fnmatch
+import subprocess
+
import libvirt
import lib
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- running, _ = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ running, _ = lib.get_all_guests (c)
for vm in running.values():
- node = vm['node']
- dom = vm['dom']
+ node_name = vm['node']
+ vm_name = vm['vm']
# Form the name of this VM (eg. "ham0:vm") so we can match it
# against the wildcards (eg. "ham0:*")
- name = node.name + ":" + dom.name()
+ name = node_name + ":" + vm_name
for wc in args.wildcards:
- if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (dom.name(), wc):
+ if fnmatch.fnmatch (name, wc) or fnmatch.fnmatch (vm_name, wc):
if args.force:
- dom.destroy()
+ subprocess.check_call (["virsh",
+ "-c", lib.uri_of_node (node_name),
+ "destroy", vm_name])
else:
- dom.shutdown()
+ subprocess.check_call (["virsh",
+ "-c", lib.uri_of_node (node_name),
+ "shutdown", vm_name])
)
p.set_defaults (run=run)
-def run (c, args, nodes):
- running, _ = lib.get_all_guests (c, nodes.values ())
+def run (c, args):
+ running, _ = lib.get_all_guests (c)
m = re.match (r'^(.*):(.*)$', args.vm)
- node_name = None
if m:
# We don't actually care about the node, but we check it
# is the expected one below.
- node_name = m.group (1)
+ node_name_check = m.group (1)
vm_name = m.group (2)
else:
+ node_name_check = None
vm_name = args.vm
if vm_name not in running:
sys.exit ("error: vm %s not found or not running" % vm_name)
- dom = running[vm_name]['dom']
- node = running[vm_name]['node']
+ vm = running[vm_name]['vm']
+ node_name = running[vm_name]['node']
- if node_name and node.name != node_name:
+ if node_name_check and node_name != node_name_check:
sys.exit ("error: vm %s is not running on node %s, did you mean %s:%s ?" %
- (vm_name, node_name, node.name, vm_name))
+ (vm_name, node_name_check, node_name, vm_name))
# Run the virsh console command.
- subprocess.call ([config.VIRT_VIEWER, "-c", node.uri, vm_name])
+ subprocess.call ([config.VIRT_VIEWER,
+ "-c", lib.uri_of_node (node_name),
+ vm_name])