Skip to content

Commit

Permalink
Merge pull request #2731 from zli/CA-209401
Browse files Browse the repository at this point in the history
CA-209401: add static-vdis detach feature for HA
  • Loading branch information
robhoes authored Aug 18, 2016
2 parents c062888 + 5667bef commit 106a0d2
Show file tree
Hide file tree
Showing 4 changed files with 82 additions and 5 deletions.
10 changes: 8 additions & 2 deletions ocaml/xapi/static_vdis.ml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,12 @@ let permanent_vdi_detach_by_uuid ~__context ~uuid =
end;
ignore(Helpers.call_script !Xapi_globs.static_vdis [ "del"; uuid ])

let detach_only vdi =
if vdi.currently_attached then begin
info "vdi_detach_by_uuid: vdi-uuid = %s" vdi.uuid;
ignore (Helpers.call_script !Xapi_globs.static_vdis ["detach"; vdi.uuid])
end

(** Added for CA-48539. Deactivates a vdi. You should probably follow
this call with one of the previous vdi_detach functions. *)
let permanent_vdi_deactivate_by_uuid ~__context ~uuid =
Expand Down Expand Up @@ -86,6 +92,6 @@ let gc () =
let reattempt_on_boot_attach () =
let script = "attach-static-vdis" in
try
ignore(Helpers.call_script "/sbin/service" [ script; "restart" ])
ignore(Helpers.call_script "/sbin/service" [ script; "start" ])
with e ->
warn "Attempt to reattach static VDIs via '%s restart' failed: %s" script (ExnHelper.string_of_exn e)
warn "Attempt to reattach static VDIs via '%s start' failed: %s" script (ExnHelper.string_of_exn e)
4 changes: 3 additions & 1 deletion ocaml/xapi/xapi_ha.ml
Original file line number Diff line number Diff line change
Expand Up @@ -1554,7 +1554,7 @@ let before_clean_shutdown_or_reboot ~__context ~host =
then raise (Api_errors.Server_error(Api_errors.ha_lost_statefile, []));

(* From this point we will fence ourselves if any unexpected error occurs *)
try
begin try
begin
try ha_disarm_fencing __context host
with Xha_error Xha_errno.Mtc_exit_daemon_is_not_present ->
Expand Down Expand Up @@ -1584,4 +1584,6 @@ let before_clean_shutdown_or_reboot ~__context ~host =
Thread.delay 300.;
info "Still waiting to reboot after %.2f seconds" (Unix.gettimeofday () -. start)
done
end;
List.iter Static_vdis.detach_only (Static_vdis.list())
end
30 changes: 28 additions & 2 deletions scripts/init.d-attach-static-vdis
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
#! /bin/bash
#
# attach-static-vdis Attaches any statically-configured VDIs to dom0
# attach-static-vdis Attaches/detaches any statically-configured VDIs to dom0
#
# chkconfig: 2345 20 78
# description: attaches any statically-configured VDIs to dom0
# description: attaches/detaches any statically-configured VDIs to dom0

STATE_DIR=/etc/xensource/static-vdis

Expand Down Expand Up @@ -32,23 +32,48 @@ clear_stale_state(){
}

attach_all(){
RC=0
ALL=$(ls -1 ${STATE_DIR})

for i in ${ALL}; do
UUID=$(cat ${STATE_DIR}/${i}/vdi-uuid)
logger "Attempting to attach VDI: ${UUID}"
OUTPUT=$(/opt/xensource/bin/static-vdis attach ${UUID} 2>&1)
if [ $? -ne 0 ]; then
RC=1
logger "Attempt to attach VDI: ${UUID} failed -- skipping (Error was: ${OUTPUT})"
fi
done
return $RC
}

detach_all(){
ALL=$(ls -1 ${STATE_DIR})

for i in ${ALL}; do
UUID=$(cat ${STATE_DIR}/${i}/vdi-uuid)
logger "Attempting to detach VDI: ${UUID}"
OUTPUT=$(/opt/xensource/bin/static-vdis detach ${UUID} 2>&1)
if [ $? -ne 0 ]; then
logger "Attempt to detach VDI: ${UUID} failed -- skipping (Error was: ${OUTPUT})"
fi
done
}

start() {
echo -n $"Attempting to attach all statically-configured VDIs"
clear_stale_state
attach_all
RC=$?
echo
return $RC
}


stop() {
echo -n $"Attempting to detach all statically-configured VDIs"
detach_all
echo
return 0
}

Expand All @@ -57,6 +82,7 @@ start)
start
;;
stop)
stop
;;
restart)
start
Expand Down
43 changes: 43 additions & 0 deletions scripts/static-vdis
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,20 @@ def call_backend_attach(driver, config):
path = xmlrpc[0][0]
return path

def call_backend_detach(driver, config):
params = xmlrpclib.loads(config)[0][0]
params['command'] = 'vdi_detach_from_config'
config = xmlrpclib.dumps(tuple([params]), params['command'])
xml = doexec([ driver, config ])
if xml[0] <> 0:
raise Exception("SM_BACKEND_FAILURE(%d, %s, %s)" % xml)
xmlrpc = xmlrpclib.loads(xml[1])
try:
res = xmlrpc[0][0]['params']
except:
res = xmlrpc[0][0]
return res

def attach(vdi_uuid):
found = False
for existing in list():
Expand Down Expand Up @@ -230,13 +244,40 @@ def attach(vdi_uuid):
return d + "/disk"
if not found:
raise Exception("Disk configuration not found")

def detach(vdi_uuid):
found = False
for existing in list():
if existing['vdi-uuid'] == vdi_uuid:
if not (existing.has_key('disk')):
return
found = True
d = main_dir + "/" + existing['id']
if not (os.path.exists(d + "/sr-uri")):
# SMAPIv1
config = read_whole_file(d + "/config")
driver = read_whole_file(d + "/driver")
call_backend_detach(driver, config)
else:
volume_plugin = read_whole_file(d + "/volume-plugin")
vol_key = read_whole_file(d + "/volume-key")
vol_uri = read_whole_file(d + "/volume-uri")
scheme = urlparse.urlparse(vol_uri).scheme
call_datapath_plugin(scheme, "Datapath.deactivate", [ vol_uri, "0" ])
call_datapath_plugin(scheme, "Datapath.detach", [ vol_uri, "0" ])
os.unlink(d + "/disk")
return
if not found:
raise Exception("Disk configuration not found")


def usage():
print "Usage:"
print " %s list -- print a list of VDIs which will be attached on host boot" % sys.argv[0]
print " %s add <uuid> <reason> -- make the VDI <uuid> available on host boot" % sys.argv[0]
print " %s del <uuid> -- cease making the VDI <uuid> available on host boot" % sys.argv[0]
print " %s attach <uuid> -- attach the VDI immediately" % sys.argv[0]
print " %s detach <uuid> -- detach the VDI immediately" % sys.argv[0]
sys.exit(1)

if __name__ == "__main__":
Expand All @@ -260,6 +301,8 @@ if __name__ == "__main__":
elif sys.argv[1] == "attach" and len(sys.argv) == 3:
path = attach(sys.argv[2])
print path
elif sys.argv[1] == "detach" and len(sys.argv) == 3:
detach(sys.argv[2])
else:
usage()

0 comments on commit 106a0d2

Please sign in to comment.